Merge branch 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel...
authorDave Airlie <airlied@redhat.com>
Sun, 16 Dec 2012 06:05:03 +0000 (06:05 +0000)
committerDave Airlie <airlied@redhat.com>
Sun, 16 Dec 2012 06:05:03 +0000 (06:05 +0000)
Daniel writes:
A few leftover fixes for 3.8:
- VIC support for hdmi infoframes with the associated drm helper, fixes
  some black TVs (Paulo Zanoni)
- Modeset state check (and fixup if the BIOS messed with the hw) for
  lid-open. modeset-rework fallout. Somehow the original reporter went
  awol, so this stalled for way too long until we've found a new
  victim^Wreporter with broken BIOS.
- seqno wrap fixes from Mika and Chris.
- Some minor fixes all over from various people.
- Another race fix in the pageflip vs. unpin code from Chris.
- hsw vga resume support and a few more fdi link fixes (only used for vga
  on hsw) from Paulo.
- Regression fix for DMAR from Zhenyu Wang - I've scavenged memory from my
  DMAR for a while and it broke right away :(
- Regression fix from Takashi Iwai for ivb lvds - some w/a needs to be
  (partially) moved back into place. Note that these are regressions in
  -next.
- One more fix for ivb 3 pipe support - it now actually seems to work.

* 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel: (25 commits)
  drm/i915: Fix missed needs_dmar setting
  drm/i915: Fix shifted screen on top of LVDS on IVY laptop
  drm/i915: disable cpt phase pointer fdi rx workaround
  drm/i915: set the LPT FDI RX polarity reversal bit when needed
  drm/i915: add lpt_init_pch_refclk
  drm/i915: add support for mPHY destination on intel_sbi_{read, write}
  drm/i915: reject modes the LPT FDI receiver can't handle
  drm/i915: fix hsw_fdi_link_train "retry" code
  drm/i915: Close race between processing unpin task and queueing the flip
  drm/i915: fixup l3 parity sysfs access check
  drm/i915: Clear the existing watermarks for g4x when modifying the cursor sr
  drm/i915: do not access BLC_PWM_CTL2 on pre-gen4 hardware
  drm/i915: Don't allow ring tail to reach the same cacheline as head
  drm/i915: Decouple the object from the unbound list before freeing pages
  drm/i915: Set sync_seqno properly after seqno wrap
  drm/i915: Include the last semaphore sync point in the error-state
  drm/i915: Rearrange code to only have a single method for waiting upon the ring
  drm/i915: Simplify flushing activity on the ring
  drm/i915: Preallocate next seqno before touching the ring
  drm/i915: force restore on lid open
  ...

796 files changed:
CREDITS
Documentation/DMA-attributes.txt
Documentation/DocBook/drm.tmpl
Documentation/arm64/memory.txt
Documentation/cgroups/memory.txt
Documentation/devicetree/bindings/net/mdio-gpio.txt
Documentation/filesystems/proc.txt
Documentation/kref.txt
Documentation/networking/netdev-features.txt
Documentation/networking/vxlan.txt
MAINTAINERS
Makefile
arch/alpha/kernel/osf_sys.c
arch/arm/boot/Makefile
arch/arm/boot/dts/tegra30.dtsi
arch/arm/include/asm/io.h
arch/arm/include/asm/sched_clock.h
arch/arm/include/asm/vfpmacros.h
arch/arm/include/uapi/asm/hwcap.h
arch/arm/kernel/sched_clock.c
arch/arm/mach-at91/at91rm9200_devices.c
arch/arm/mach-at91/at91sam9260_devices.c
arch/arm/mach-at91/at91sam9261_devices.c
arch/arm/mach-at91/at91sam9263_devices.c
arch/arm/mach-at91/at91sam9g45_devices.c
arch/arm/mach-davinci/dm644x.c
arch/arm/mach-exynos/dma.c
arch/arm/mach-exynos/include/mach/map.h
arch/arm/mach-highbank/system.c
arch/arm/mach-imx/clk-gate2.c
arch/arm/mach-imx/ehci-imx25.c
arch/arm/mach-imx/ehci-imx35.c
arch/arm/mach-omap2/board-igep0020.c
arch/arm/mach-omap2/clockdomains44xx_data.c
arch/arm/mach-omap2/common-board-devices.c
arch/arm/mach-omap2/devices.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/twl-common.c
arch/arm/mach-omap2/vc.c
arch/arm/mach-pxa/hx4700.c
arch/arm/mach-pxa/spitz_pm.c
arch/arm/mm/alignment.c
arch/arm/mm/dma-mapping.c
arch/arm/plat-omap/i2c.c
arch/arm/plat-omap/include/plat/omap_hwmod.h
arch/arm/tools/Makefile
arch/arm/vfp/vfpmodule.c
arch/arm/xen/enlighten.c
arch/arm64/Kconfig
arch/arm64/include/asm/elf.h
arch/arm64/include/asm/fpsimd.h
arch/arm64/include/asm/io.h
arch/arm64/include/asm/pgtable-hwdef.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/unistd.h
arch/arm64/kernel/perf_event.c
arch/arm64/kernel/process.c
arch/arm64/kernel/smp.c
arch/arm64/mm/init.c
arch/h8300/include/asm/cache.h
arch/ia64/mm/init.c
arch/m68k/include/asm/signal.h
arch/mips/cavium-octeon/executive/cvmx-l2c.c
arch/mips/fw/arc/misc.c
arch/mips/include/asm/bitops.h
arch/mips/include/asm/compat.h
arch/mips/include/asm/io.h
arch/mips/include/asm/irqflags.h
arch/mips/include/asm/thread_info.h
arch/mips/kernel/setup.c
arch/mips/lib/Makefile
arch/mips/lib/bitops.c [new file with mode: 0644]
arch/mips/lib/mips-atomic.c [new file with mode: 0644]
arch/mips/mti-malta/malta-platform.c
arch/parisc/kernel/signal32.c
arch/parisc/kernel/sys_parisc.c
arch/powerpc/boot/dts/mpc5200b.dtsi
arch/powerpc/boot/dts/o2d.dtsi
arch/powerpc/boot/dts/pcm030.dts
arch/powerpc/platforms/52xx/mpc52xx_pic.c
arch/powerpc/platforms/pseries/eeh_pe.c
arch/powerpc/platforms/pseries/msi.c
arch/s390/Kconfig
arch/s390/include/asm/cio.h
arch/s390/include/asm/compat.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/topology.h
arch/s390/include/uapi/asm/ptrace.h
arch/s390/kernel/compat_signal.c
arch/s390/kernel/sclp.S
arch/s390/kernel/signal.c
arch/s390/kernel/topology.c
arch/s390/lib/uaccess_pt.c
arch/s390/mm/gup.c
arch/sparc/Kconfig
arch/sparc/crypto/Makefile
arch/sparc/crypto/aes_glue.c
arch/sparc/crypto/camellia_glue.c
arch/sparc/crypto/crc32c_glue.c
arch/sparc/crypto/des_glue.c
arch/sparc/crypto/md5_glue.c
arch/sparc/crypto/sha1_glue.c
arch/sparc/crypto/sha256_glue.c
arch/sparc/crypto/sha512_glue.c
arch/sparc/include/asm/atomic_64.h
arch/sparc/include/asm/backoff.h
arch/sparc/include/asm/compat.h
arch/sparc/include/asm/processor_64.h
arch/sparc/include/asm/prom.h
arch/sparc/include/asm/thread_info_64.h
arch/sparc/include/asm/ttable.h
arch/sparc/include/uapi/asm/unistd.h
arch/sparc/kernel/entry.h
arch/sparc/kernel/leon_kernel.c
arch/sparc/kernel/perf_event.c
arch/sparc/kernel/process_64.c
arch/sparc/kernel/ptrace_64.c
arch/sparc/kernel/setup_64.c
arch/sparc/kernel/signal_64.c
arch/sparc/kernel/sys_sparc_64.c
arch/sparc/kernel/systbls_32.S
arch/sparc/kernel/systbls_64.S
arch/sparc/kernel/unaligned_64.c
arch/sparc/kernel/visemul.c
arch/sparc/kernel/vmlinux.lds.S
arch/sparc/kernel/winfixup.S
arch/sparc/lib/atomic_64.S
arch/sparc/lib/ksyms.c
arch/sparc/math-emu/math_64.c
arch/unicore32/Kconfig
arch/unicore32/include/asm/Kbuild
arch/unicore32/include/asm/bug.h
arch/unicore32/include/asm/cmpxchg.h
arch/unicore32/include/asm/kvm_para.h [deleted file]
arch/unicore32/include/asm/processor.h
arch/unicore32/include/asm/ptrace.h
arch/unicore32/include/uapi/asm/Kbuild
arch/unicore32/include/uapi/asm/byteorder.h [moved from arch/unicore32/include/asm/byteorder.h with 100% similarity]
arch/unicore32/include/uapi/asm/ptrace.h [new file with mode: 0644]
arch/unicore32/include/uapi/asm/sigcontext.h [moved from arch/unicore32/include/asm/sigcontext.h with 100% similarity]
arch/unicore32/include/uapi/asm/unistd.h [moved from arch/unicore32/include/asm/unistd.h with 92% similarity]
arch/unicore32/kernel/entry.S
arch/unicore32/kernel/process.c
arch/unicore32/kernel/setup.h
arch/unicore32/kernel/sys.c
arch/unicore32/mm/fault.c
arch/x86/boot/compressed/eboot.c
arch/x86/boot/header.S
arch/x86/include/asm/ptrace.h
arch/x86/include/asm/xen/hypercall.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/cpu/mcheck/mce_intel.c
arch/x86/kernel/entry_64.S
arch/x86/kernel/microcode_amd.c
arch/x86/kernel/ptrace.c
arch/x86/kvm/cpuid.h
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/tlb.c
arch/x86/pci/ce4100.c
arch/x86/platform/ce4100/ce4100.c
block/blk-exec.c
crypto/cryptd.c
drivers/ata/ahci_platform.c
drivers/ata/libata-acpi.c
drivers/ata/libata-core.c
drivers/ata/libata-scsi.c
drivers/ata/pata_arasan_cf.c
drivers/ata/sata_highbank.c
drivers/ata/sata_svw.c
drivers/base/platform.c
drivers/base/power/qos.c
drivers/block/aoe/aoecmd.c
drivers/block/floppy.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/mtip32xx/mtip32xx.h
drivers/bluetooth/ath3k.c
drivers/bluetooth/btusb.c
drivers/bus/omap-ocp2scp.c
drivers/clk/ux500/u8500_clk.c
drivers/edac/amd64_edac.h
drivers/edac/edac_stub.c
drivers/edac/mce_amd_inj.c
drivers/firewire/sbp2.c
drivers/gpio/Kconfig
drivers/gpio/gpio-mcp23s08.c
drivers/gpio/gpio-mvebu.c
drivers/gpu/drm/Makefile
drivers/gpu/drm/ast/ast_ttm.c
drivers/gpu/drm/cirrus/cirrus_ttm.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_hashtab.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/drm_pci.c
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/exynos/Kconfig
drivers/gpu/drm/exynos/Makefile
drivers/gpu/drm/exynos/exynos_ddc.c
drivers/gpu/drm/exynos/exynos_drm_buf.c
drivers/gpu/drm/exynos/exynos_drm_buf.h
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_encoder.c
drivers/gpu/drm/exynos/exynos_drm_encoder.h
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/exynos/exynos_drm_fimc.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_fimc.h [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.h
drivers/gpu/drm/exynos/exynos_drm_gsc.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_gsc.h [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_hdmi.c
drivers/gpu/drm/exynos/exynos_drm_hdmi.h
drivers/gpu/drm/exynos/exynos_drm_iommu.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_iommu.h [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_ipp.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_ipp.h [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_plane.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_rotator.h [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_hdmiphy.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/exynos/regs-fimc.h [new file with mode: 0644]
drivers/gpu/drm/exynos/regs-gsc.h [new file with mode: 0644]
drivers/gpu/drm/exynos/regs-hdmi.h
drivers/gpu/drm/exynos/regs-rotator.h [new file with mode: 0644]
drivers/gpu/drm/gma500/cdv_device.c
drivers/gpu/drm/gma500/cdv_intel_dp.c
drivers/gpu/drm/gma500/cdv_intel_hdmi.c
drivers/gpu/drm/gma500/cdv_intel_lvds.c
drivers/gpu/drm/gma500/mdfld_dsi_output.c
drivers/gpu/drm/gma500/mdfld_intel_display.c
drivers/gpu/drm/gma500/oaktrail_crtc.c
drivers/gpu/drm/gma500/oaktrail_lvds.c
drivers/gpu/drm/gma500/psb_intel_lvds.c
drivers/gpu/drm/gma500/psb_intel_sdvo.c
drivers/gpu/drm/i2c/ch7006_drv.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/mgag200/mgag200_ttm.c
drivers/gpu/drm/nouveau/Makefile
drivers/gpu/drm/nouveau/core/core/engctx.c
drivers/gpu/drm/nouveau/core/core/falcon.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/core/gpuobj.c
drivers/gpu/drm/nouveau/core/core/mm.c
drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
drivers/gpu/drm/nouveau/core/engine/disp/nv50.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/nv84.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/nv94.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/nva0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/nva3.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
drivers/gpu/drm/nouveau/core/engine/disp/nve0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/fifo/base.c
drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
drivers/gpu/drm/nouveau/core/engine/graph/regs.h
drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/software/nv04.c
drivers/gpu/drm/nouveau/core/engine/software/nv10.c
drivers/gpu/drm/nouveau/core/engine/software/nv50.c
drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/vp/nve0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/include/core/class.h
drivers/gpu/drm/nouveau/core/include/core/engctx.h
drivers/gpu/drm/nouveau/core/include/core/falcon.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
drivers/gpu/drm/nouveau/core/include/core/mm.h
drivers/gpu/drm/nouveau/core/include/core/object.h
drivers/gpu/drm/nouveau/core/include/core/parent.h
drivers/gpu/drm/nouveau/core/include/engine/bsp.h
drivers/gpu/drm/nouveau/core/include/engine/copy.h
drivers/gpu/drm/nouveau/core/include/engine/crypt.h
drivers/gpu/drm/nouveau/core/include/engine/disp.h
drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
drivers/gpu/drm/nouveau/core/include/engine/fifo.h
drivers/gpu/drm/nouveau/core/include/engine/ppp.h
drivers/gpu/drm/nouveau/core/include/engine/vp.h
drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
drivers/gpu/drm/nouveau/core/include/subdev/clock.h
drivers/gpu/drm/nouveau/core/include/subdev/fb.h
drivers/gpu/drm/nouveau/core/subdev/bar/base.c
drivers/gpu/drm/nouveau/core/subdev/bios/base.c
drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
drivers/gpu/drm/nouveau/core/subdev/bios/disp.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
drivers/gpu/drm/nouveau/core/subdev/bios/init.c
drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/device/base.c
drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
drivers/gpu/drm/nouveau/core/subdev/fb/base.c
drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
drivers/gpu/drm/nouveau/core/subdev/mc/base.c
drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
drivers/gpu/drm/nouveau/nouveau_abi16.c
drivers/gpu/drm/nouveau/nouveau_acpi.c
drivers/gpu/drm/nouveau/nouveau_acpi.h
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_bios.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_bo.h
drivers/gpu/drm/nouveau/nouveau_chan.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_connector.h
drivers/gpu/drm/nouveau/nouveau_crtc.h
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_dp.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_drm.h
drivers/gpu/drm/nouveau/nouveau_encoder.h
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nouveau_hdmi.c [deleted file]
drivers/gpu/drm/nouveau/nouveau_irq.c
drivers/gpu/drm/nouveau/nouveau_prime.c
drivers/gpu/drm/nouveau/nouveau_vga.c
drivers/gpu/drm/nouveau/nv04_crtc.c
drivers/gpu/drm/nouveau/nv04_display.c
drivers/gpu/drm/nouveau/nv10_fence.c
drivers/gpu/drm/nouveau/nv17_tv.c
drivers/gpu/drm/nouveau/nv50_crtc.c [deleted file]
drivers/gpu/drm/nouveau/nv50_cursor.c [deleted file]
drivers/gpu/drm/nouveau/nv50_dac.c [deleted file]
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nv50_display.h
drivers/gpu/drm/nouveau/nv50_evo.c [deleted file]
drivers/gpu/drm/nouveau/nv50_evo.h [deleted file]
drivers/gpu/drm/nouveau/nv50_fence.c
drivers/gpu/drm/nouveau/nv50_pm.c
drivers/gpu/drm/nouveau/nv50_sor.c [deleted file]
drivers/gpu/drm/nouveau/nvc0_fence.c
drivers/gpu/drm/nouveau/nvd0_display.c [deleted file]
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/nid.h
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/r600_reg.h
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_agp.c
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_cp.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_cursor.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_drv.h
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_test.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/rv770d.h
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/sid.h
drivers/gpu/drm/shmobile/shmob_drm_crtc.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/hdmi.c
drivers/gpu/drm/tegra/host1x.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/ttm/ttm_execbuf_util.c
drivers/gpu/drm/ttm/ttm_object.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/udl/udl_connector.c
drivers/gpu/drm/vmwgfx/Makefile
drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h [new file with mode: 0644]
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
drivers/gpu/drm/vmwgfx/vmwgfx_context.c [new file with mode: 0644]
drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h [new file with mode: 0644]
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c [new file with mode: 0644]
drivers/hid/hid-microsoft.c
drivers/hid/hidraw.c
drivers/hwmon/asb100.c
drivers/hwmon/w83627ehf.c
drivers/hwmon/w83627hf.c
drivers/hwmon/w83781d.c
drivers/hwmon/w83791d.c
drivers/hwmon/w83792d.c
drivers/hwmon/w83l786ng.c
drivers/i2c/busses/i2c-at91.c
drivers/i2c/busses/i2c-mxs.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/i2c/muxes/i2c-mux-pinctrl.c
drivers/input/input-mt.c
drivers/input/mousedev.c
drivers/input/touchscreen/ads7846.c
drivers/iommu/intel-iommu.c
drivers/iommu/tegra-smmu.c
drivers/irqchip/irq-bcm2835.c
drivers/isdn/Kconfig
drivers/isdn/i4l/Kconfig
drivers/isdn/i4l/isdn_common.c
drivers/leds/ledtrig-cpu.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/mmc/host/dw_mmc-exynos.c
drivers/mmc/host/dw_mmc-pltfm.c
drivers/mmc/host/dw_mmc-pltfm.h
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/mxcmmc.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/sdhci-dove.c
drivers/mmc/host/sdhci-of-esdhc.c
drivers/mmc/host/sdhci-pci.c
drivers/mmc/host/sdhci-pltfm.c
drivers/mmc/host/sdhci-s3c.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/sdhci.h
drivers/mmc/host/sh_mmcif.c
drivers/mtd/devices/slram.c
drivers/mtd/nand/nand_base.c
drivers/mtd/ofpart.c
drivers/mtd/onenand/onenand_base.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/8390/ne.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/micrel/ksz884x.c
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/sis/sis900.c
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/tile/tilegx.c
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/ethernet/xscale/ixp4xx_eth.c
drivers/net/irda/sir_dev.c
drivers/net/phy/mdio-bitbang.c
drivers/net/phy/mdio-gpio.c
drivers/net/team/team_mode_broadcast.c
drivers/net/usb/cdc_eem.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/smsc95xx.c
drivers/net/usb/usbnet.c
drivers/net/vxlan.c
drivers/net/wan/ixp4xx_hss.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/b43legacy/pio.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/dvm/main.c
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/xen-netfront.c
drivers/nfc/pn533.c
drivers/pci/bus.c
drivers/pci/pci-driver.c
drivers/pci/pci-sysfs.c
drivers/pci/pci.c
drivers/pci/pci.h
drivers/pci/pcie/aer/aerdrv_core.c
drivers/pci/pcie/portdrv_core.c
drivers/pci/proc.c
drivers/pinctrl/Kconfig
drivers/pinctrl/spear/pinctrl-spear.c
drivers/pinctrl/spear/pinctrl-spear1310.c
drivers/pinctrl/spear/pinctrl-spear1340.c
drivers/pinctrl/spear/pinctrl-spear320.c
drivers/pinctrl/spear/pinctrl-spear3xx.h
drivers/rapidio/rio.c
drivers/regulator/core.c
drivers/s390/char/con3215.c
drivers/s390/cio/css.h
drivers/s390/cio/device.c
drivers/s390/cio/idset.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/scsi/isci/request.c
drivers/scsi/qlogicpti.c
drivers/scsi/scsi.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/scsi/sd.h
drivers/staging/android/android_alarm.h
drivers/tty/hvc/hvc_console.c
drivers/tty/serial/max310x.c
drivers/usb/core/hcd.c
drivers/usb/early/ehci-dbgp.c
drivers/usb/gadget/u_ether.c
drivers/usb/host/ehci-ls1x.c
drivers/usb/host/ohci-xls.c
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/ux500.c
drivers/usb/otg/Kconfig
drivers/usb/serial/keyspan.c
drivers/usb/serial/option.c
drivers/usb/serial/usb_wwan.c
drivers/usb/storage/scsiglue.c
drivers/video/omap2/dss/dsi.c
drivers/video/omap2/dss/dss.c
drivers/video/omap2/dss/hdmi.c
drivers/video/omap2/omapfb/omapfb-ioctl.c
drivers/virtio/virtio.c
drivers/xen/Makefile
drivers/xen/events.c
drivers/xen/fallback.c [new file with mode: 0644]
drivers/xen/privcmd.c
fs/cifs/cifsacl.c
fs/cifs/dir.c
fs/eventpoll.c
fs/ext3/balloc.c
fs/file.c
fs/gfs2/file.c
fs/gfs2/lops.c
fs/gfs2/quota.c
fs/gfs2/rgrp.c
fs/gfs2/super.c
fs/gfs2/trans.c
fs/jffs2/file.c
fs/notify/fanotify/fanotify.c
fs/notify/fanotify/fanotify_user.c
fs/proc/base.c
fs/pstore/platform.c
fs/reiserfs/inode.c
fs/reiserfs/stree.c
fs/reiserfs/super.c
fs/ubifs/find.c
fs/ubifs/lprops.c
fs/ubifs/ubifs.h
fs/xfs/xfs_alloc.c
fs/xfs/xfs_alloc.h
fs/xfs/xfs_alloc_btree.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_attr_leaf.c
fs/xfs/xfs_bmap.c
fs/xfs/xfs_bmap.h
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_fsops.c
fs/xfs/xfs_ialloc.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log_recover.c
include/drm/drm_crtc.h
include/drm/drm_dp_helper.h
include/drm/drm_hashtab.h
include/drm/drm_pciids.h
include/drm/exynos_drm.h
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_bo_driver.h
include/linux/clk-provider.h
include/linux/dma-attrs.h
include/linux/i2c-omap.h
include/linux/mm.h
include/linux/mmc/dw_mmc.h
include/linux/mmc/sdhci.h
include/linux/mmzone.h
include/linux/of_address.h
include/linux/platform_data/omap_ocp2scp.h [new file with mode: 0644]
include/linux/ptp_clock_kernel.h
include/linux/rio.h
include/linux/spi/ads7846.h
include/net/xfrm.h
include/scsi/scsi_device.h
include/uapi/drm/exynos_drm.h
include/uapi/drm/radeon_drm.h
include/uapi/linux/eventpoll.h
include/uapi/linux/oom.h
include/xen/hvm.h
kernel/futex.c
kernel/module.c
lib/mpi/longlong.h
mm/bootmem.c
mm/highmem.c
mm/memcontrol.c
mm/memory.c
mm/memory_hotplug.c
mm/mmap.c
mm/mmzone.c
mm/nobootmem.c
mm/page_alloc.c
mm/shmem.c
mm/swapfile.c
mm/vmscan.c
net/batman-adv/soft-interface.c
net/batman-adv/translation-table.c
net/bluetooth/hci_core.c
net/bluetooth/mgmt.c
net/bluetooth/smp.c
net/core/dev.c
net/core/dev_addr_lists.c
net/core/net-sysfs.c
net/core/rtnetlink.c
net/ipv4/inet_diag.c
net/ipv4/ip_sockglue.c
net/ipv4/ip_vti.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_output.c
net/ipv4/xfrm4_policy.c
net/ipv6/inet6_connection_sock.c
net/ipv6/ip6_gre.c
net/ipv6/ipv6_sockglue.c
net/ipv6/ndisc.c
net/mac80211/cfg.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/main.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/status.c
net/mac80211/tx.c
net/mac80211/util.c
net/netfilter/ipset/ip_set_hash_ip.c
net/netfilter/ipset/ip_set_hash_ipport.c
net/netfilter/ipset/ip_set_hash_ipportip.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/nfnetlink_cttimeout.c
net/nfc/llcp/llcp.c
net/sched/sch_qfq.c
net/sctp/proc.c
net/tipc/handler.c
net/wireless/reg.c
scripts/Makefile.modinst
scripts/checkpatch.pl
scripts/kconfig/expr.h
scripts/kconfig/list.h [new file with mode: 0644]
scripts/kconfig/lkc_proto.h
scripts/kconfig/mconf.c
scripts/kconfig/menu.c
scripts/sign-file
security/device_cgroup.c
security/selinux/netnode.c
sound/core/oss/mixer_oss.c
sound/core/oss/pcm_oss.c
sound/core/pcm_native.c
sound/core/sound.c
sound/core/sound_oss.c
sound/i2c/other/ak4113.c
sound/i2c/other/ak4114.c
sound/i2c/other/ak4117.c
sound/pci/es1968.c
sound/pci/fm801.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_codec.h
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_analog.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_via.c
sound/pci/rme9652/hdspm.c
sound/soc/codecs/arizona.c
sound/soc/codecs/cs4271.c
sound/soc/codecs/cs42l52.c
sound/soc/codecs/wm5102.c
sound/soc/codecs/wm8978.c
sound/soc/codecs/wm8994.c
sound/soc/kirkwood/kirkwood-dma.c
sound/soc/kirkwood/kirkwood-i2s.c
sound/soc/mxs/mxs-saif.c
sound/soc/samsung/Kconfig
sound/soc/samsung/bells.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/usb/card.c
sound/usb/endpoint.c
sound/usb/endpoint.h
sound/usb/midi.c
sound/usb/pcm.c
tools/power/x86/turbostat/turbostat.c
tools/testing/selftests/Makefile
tools/testing/selftests/epoll/Makefile [deleted file]
tools/testing/selftests/epoll/test_epoll.c [deleted file]

diff --git a/CREDITS b/CREDITS
index d8fe12a..2346b09 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -1823,6 +1823,11 @@ S: Kattreinstr 38
 S: D-64295
 S: Germany
 
+N: Avi Kivity
+E: avi.kivity@gmail.com
+D: Kernel-based Virtual Machine (KVM)
+S: Ra'annana, Israel
+
 N: Andi Kleen
 E: andi@firstfloor.org
 U: http://www.halobates.de
index f503090..e59480d 100644 (file)
@@ -91,3 +91,12 @@ transferred to 'device' domain. This attribute can be also used for
 dma_unmap_{single,page,sg} functions family to force buffer to stay in
 device domain after releasing a mapping for it. Use this attribute with
 care!
+
+DMA_ATTR_FORCE_CONTIGUOUS
+-------------------------
+
+By default DMA-mapping subsystem is allowed to assemble the buffer
+allocated by dma_alloc_attrs() function from individual pages if it can
+be mapped as contiguous chunk into device dma address space. By
+specifing this attribute the allocated buffer is forced to be contiguous
+also in physical memory.
index c9cbb3f..4ee2304 100644 (file)
@@ -1611,10 +1611,10 @@ void intel_crt_init(struct drm_device *dev)
     </sect2>
   </sect1>
 
-  <!-- Internals: mid-layer helper functions -->
+  <!-- Internals: kms helper functions -->
 
   <sect1>
-    <title>Mid-layer Helper Functions</title>
+    <title>Mode Setting Helper Functions</title>
     <para>
       The CRTC, encoder and connector functions provided by the drivers
       implement the DRM API. They're called by the DRM core and ioctl handlers
@@ -2096,6 +2096,21 @@ void intel_crt_init(struct drm_device *dev)
         </listitem>
       </itemizedlist>
     </sect2>
+    <sect2>
+      <title>Modeset Helper Functions Reference</title>
+!Edrivers/gpu/drm/drm_crtc_helper.c
+    </sect2>
+    <sect2>
+      <title>fbdev Helper Functions Reference</title>
+!Pdrivers/gpu/drm/drm_fb_helper.c fbdev helpers
+!Edrivers/gpu/drm/drm_fb_helper.c
+    </sect2>
+    <sect2>
+      <title>Display Port Helper Functions Reference</title>
+!Pdrivers/gpu/drm/drm_dp_helper.c dp helpers
+!Iinclude/drm/drm_dp_helper.h
+!Edrivers/gpu/drm/drm_dp_helper.c
+    </sect2>
   </sect1>
 
   <!-- Internals: vertical blanking -->
index dbbdcbb..4110cca 100644 (file)
@@ -27,17 +27,17 @@ Start                       End                     Size            Use
 -----------------------------------------------------------------------
 0000000000000000       0000007fffffffff         512GB          user
 
-ffffff8000000000       ffffffbbfffcffff        ~240GB          vmalloc
+ffffff8000000000       ffffffbbfffeffff        ~240GB          vmalloc
 
-ffffffbbfffd0000       ffffffbcfffdffff          64KB          [guard page]
+ffffffbbffff0000       ffffffbbffffffff          64KB          [guard page]
 
-ffffffbbfffe0000       ffffffbcfffeffff          64KB          PCI I/O space
+ffffffbc00000000       ffffffbdffffffff           8GB          vmemmap
 
-ffffffbbffff0000       ffffffbcffffffff          64KB          [guard page]
+ffffffbe00000000       ffffffbffbbfffff          ~8GB          [guard, future vmmemap]
 
-ffffffbc00000000       ffffffbdffffffff           8GB          vmemmap
+ffffffbffbe00000       ffffffbffbe0ffff          64KB          PCI I/O space
 
-ffffffbe00000000       ffffffbffbffffff          ~8GB          [guard, future vmmemap]
+ffffffbbffff0000       ffffffbcffffffff          ~2MB          [guard]
 
 ffffffbffc000000       ffffffbfffffffff          64MB          modules
 
index c07f7b4..71c4da4 100644 (file)
@@ -466,6 +466,10 @@ Note:
 5.3 swappiness
 
 Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
+Please note that unlike the global swappiness, memcg knob set to 0
+really prevents from any swapping even if there is a swap storage
+available. This might lead to memcg OOM killer if there are no file
+pages to reclaim.
 
 Following cgroups' swappiness can't be changed.
 - root cgroup (uses /proc/sys/vm/swappiness).
index bc95495..c79bab0 100644 (file)
@@ -8,9 +8,16 @@ gpios property as described in section VIII.1 in the following order:
 
 MDC, MDIO.
 
+Note: Each gpio-mdio bus should have an alias correctly numbered in "aliases"
+node.
+
 Example:
 
-mdio {
+aliases {
+       mdio-gpio0 = <&mdio0>;
+};
+
+mdio0: mdio {
        compatible = "virtual,mdio-gpio";
        #address-cells = <1>;
        #size-cells = <0>;
index a1793d6..3844d21 100644 (file)
@@ -33,7 +33,7 @@ Table of Contents
   2    Modifying System Parameters
 
   3    Per-Process Parameters
-  3.1  /proc/<pid>/oom_score_adj - Adjust the oom-killer
+  3.1  /proc/<pid>/oom_adj & /proc/<pid>/oom_score_adj - Adjust the oom-killer
                                                                score
   3.2  /proc/<pid>/oom_score - Display current oom-killer score
   3.3  /proc/<pid>/io - Display the IO accounting fields
@@ -1320,10 +1320,10 @@ of the kernel.
 CHAPTER 3: PER-PROCESS PARAMETERS
 ------------------------------------------------------------------------------
 
-3.1 /proc/<pid>/oom_score_adj- Adjust the oom-killer score
+3.1 /proc/<pid>/oom_adj & /proc/<pid>/oom_score_adj- Adjust the oom-killer score
 --------------------------------------------------------------------------------
 
-This file can be used to adjust the badness heuristic used to select which
+These file can be used to adjust the badness heuristic used to select which
 process gets killed in out of memory conditions.
 
 The badness heuristic assigns a value to each candidate task ranging from 0
@@ -1361,6 +1361,12 @@ same system, cpuset, mempolicy, or memory controller resources to use at least
 equivalent to discounting 50% of the task's allowed memory from being considered
 as scoring against the task.
 
+For backwards compatibility with previous kernels, /proc/<pid>/oom_adj may also
+be used to tune the badness score.  Its acceptable values range from -16
+(OOM_ADJUST_MIN) to +15 (OOM_ADJUST_MAX) and a special value of -17
+(OOM_DISABLE) to disable oom killing entirely for that task.  Its value is
+scaled linearly with /proc/<pid>/oom_score_adj.
+
 The value of /proc/<pid>/oom_score_adj may be reduced no lower than the last
 value set by a CAP_SYS_RESOURCE process. To reduce the value any lower
 requires CAP_SYS_RESOURCE.
@@ -1375,7 +1381,9 @@ minimal amount of work.
 -------------------------------------------------------------
 
 This file can be used to check the current score used by the oom-killer is for
-any given <pid>.
+any given <pid>. Use it together with /proc/<pid>/oom_score_adj to tune which
+process should be killed in an out-of-memory situation.
+
 
 3.3  /proc/<pid>/io - Display the IO accounting fields
 -------------------------------------------------------
index 48ba715..ddf85a5 100644 (file)
@@ -213,3 +213,91 @@ presentation on krefs, which can be found at:
 and:
   http://www.kroah.com/linux/talks/ols_2004_kref_talk/
 
+
+The above example could also be optimized using kref_get_unless_zero() in
+the following way:
+
+static struct my_data *get_entry()
+{
+       struct my_data *entry = NULL;
+       mutex_lock(&mutex);
+       if (!list_empty(&q)) {
+               entry = container_of(q.next, struct my_data, link);
+               if (!kref_get_unless_zero(&entry->refcount))
+                       entry = NULL;
+       }
+       mutex_unlock(&mutex);
+       return entry;
+}
+
+static void release_entry(struct kref *ref)
+{
+       struct my_data *entry = container_of(ref, struct my_data, refcount);
+
+       mutex_lock(&mutex);
+       list_del(&entry->link);
+       mutex_unlock(&mutex);
+       kfree(entry);
+}
+
+static void put_entry(struct my_data *entry)
+{
+       kref_put(&entry->refcount, release_entry);
+}
+
+Which is useful to remove the mutex lock around kref_put() in put_entry(), but
+it's important that kref_get_unless_zero is enclosed in the same critical
+section that finds the entry in the lookup table,
+otherwise kref_get_unless_zero may reference already freed memory.
+Note that it is illegal to use kref_get_unless_zero without checking its
+return value. If you are sure (by already having a valid pointer) that
+kref_get_unless_zero() will return true, then use kref_get() instead.
+
+The function kref_get_unless_zero also makes it possible to use rcu
+locking for lookups in the above example:
+
+struct my_data
+{
+       struct rcu_head rhead;
+       .
+       struct kref refcount;
+       .
+       .
+};
+
+static struct my_data *get_entry_rcu()
+{
+       struct my_data *entry = NULL;
+       rcu_read_lock();
+       if (!list_empty(&q)) {
+               entry = container_of(q.next, struct my_data, link);
+               if (!kref_get_unless_zero(&entry->refcount))
+                       entry = NULL;
+       }
+       rcu_read_unlock();
+       return entry;
+}
+
+static void release_entry_rcu(struct kref *ref)
+{
+       struct my_data *entry = container_of(ref, struct my_data, refcount);
+
+       mutex_lock(&mutex);
+       list_del_rcu(&entry->link);
+       mutex_unlock(&mutex);
+       kfree_rcu(entry, rhead);
+}
+
+static void put_entry(struct my_data *entry)
+{
+       kref_put(&entry->refcount, release_entry_rcu);
+}
+
+But note that the struct kref member needs to remain in valid memory for a
+rcu grace period after release_entry_rcu was called. That can be accomplished
+by using kfree_rcu(entry, rhead) as done above, or by calling synchronize_rcu()
+before using kfree, but note that synchronize_rcu() may sleep for a
+substantial amount of time.
+
+
+Thomas Hellstrom <thellstrom@vmware.com>
index 4164f5c..f310ede 100644 (file)
@@ -164,4 +164,4 @@ read the CRC recorded by the NIC on receipt of the packet.
 This requests that the NIC receive all possible frames, including errored
 frames (such as bad FCS, etc).  This can be helpful when sniffing a link with
 bad packets on it.  Some NICs may receive more packets if also put into normal
-PROMISC mdoe.
+PROMISC mode.
index 5b34b76..6d99351 100644 (file)
@@ -32,7 +32,7 @@ no entry is in the forwarding table.
   # ip link delete vxlan0
 
 3. Show vxlan info
-  # ip -d show vxlan0
+  # ip -d link show vxlan0
 
 It is possible to create, destroy and display the vxlan
 forwarding table using the new bridge command.
@@ -41,7 +41,7 @@ forwarding table using the new bridge command.
   # bridge fdb add to 00:17:42:8a:b4:05 dst 192.19.0.2 dev vxlan0
 
 2. Delete forwarding table entry
-  # bridge fdb delete 00:17:42:8a:b4:05
+  # bridge fdb delete 00:17:42:8a:b4:05 dev vxlan0
 
 3. Show forwarding table
   # bridge fdb show dev vxlan0
index 59203e7..8e196d7 100644 (file)
@@ -526,17 +526,17 @@ F:        drivers/video/geode/
 F:     arch/x86/include/asm/geode.h
 
 AMD IOMMU (AMD-VI)
-M:     Joerg Roedel <joerg.roedel@amd.com>
+M:     Joerg Roedel <joro@8bytes.org>
 L:     iommu@lists.linux-foundation.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
-S:     Supported
+S:     Maintained
 F:     drivers/iommu/amd_iommu*.[ch]
 F:     include/linux/amd-iommu.h
 
 AMD MICROCODE UPDATE SUPPORT
-M:     Andreas Herrmann <andreas.herrmann3@amd.com>
+M:     Andreas Herrmann <herrmann.der.user@googlemail.com>
 L:     amd64-microcode@amd64.org
-S:     Supported
+S:     Maintained
 F:     arch/x86/kernel/microcode_amd.c
 
 AMS (Apple Motion Sensor) DRIVER
@@ -841,6 +841,14 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git
 F:     arch/arm/mach-sa1100/jornada720.c
 F:     arch/arm/mach-sa1100/include/mach/jornada720.h
 
+ARM/IGEP MACHINE SUPPORT
+M:     Enric Balletbo i Serra <eballetbo@gmail.com>
+M:     Javier Martinez Canillas <javier@dowhile0.org>
+L:     linux-omap@vger.kernel.org
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+F:     arch/arm/mach-omap2/board-igep0020.c
+
 ARM/INCOME PXA270 SUPPORT
 M:     Marek Vasut <marek.vasut@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2512,6 +2520,15 @@ S:       Supported
 F:     drivers/gpu/drm/exynos
 F:     include/drm/exynos*
 
+DRM DRIVERS FOR NVIDIA TEGRA
+M:     Thierry Reding <thierry.reding@avionic-design.de>
+L:     dri-devel@lists.freedesktop.org
+L:     linux-tegra@vger.kernel.org
+T:     git git://gitorious.org/thierryreding/linux.git
+S:     Maintained
+F:     drivers/gpu/drm/tegra/
+F:     Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
+
 DSCC4 DRIVER
 M:     Francois Romieu <romieu@fr.zoreil.com>
 L:     netdev@vger.kernel.org
@@ -2708,10 +2725,10 @@ F:      include/linux/edac.h
 
 EDAC-AMD64
 M:     Doug Thompson <dougthompson@xmission.com>
-M:     Borislav Petkov <borislav.petkov@amd.com>
+M:     Borislav Petkov <bp@alien8.de>
 L:     linux-edac@vger.kernel.org
 W:     bluesmoke.sourceforge.net
-S:     Supported
+S:     Maintained
 F:     drivers/edac/amd64_edac*
 
 EDAC-E752X
@@ -3598,6 +3615,49 @@ F:       drivers/hid/hid-hyperv.c
 F:     drivers/net/hyperv/
 F:     drivers/staging/hv/
 
+I2C OVER PARALLEL PORT
+M:     Jean Delvare <khali@linux-fr.org>
+L:     linux-i2c@vger.kernel.org
+S:     Maintained
+F:     Documentation/i2c/busses/i2c-parport
+F:     Documentation/i2c/busses/i2c-parport-light
+F:     drivers/i2c/busses/i2c-parport.c
+F:     drivers/i2c/busses/i2c-parport-light.c
+
+I2C/SMBUS CONTROLLER DRIVERS FOR PC
+M:     Jean Delvare <khali@linux-fr.org>
+L:     linux-i2c@vger.kernel.org
+S:     Maintained
+F:     Documentation/i2c/busses/i2c-ali1535
+F:     Documentation/i2c/busses/i2c-ali1563
+F:     Documentation/i2c/busses/i2c-ali15x3
+F:     Documentation/i2c/busses/i2c-amd756
+F:     Documentation/i2c/busses/i2c-amd8111
+F:     Documentation/i2c/busses/i2c-i801
+F:     Documentation/i2c/busses/i2c-nforce2
+F:     Documentation/i2c/busses/i2c-piix4
+F:     Documentation/i2c/busses/i2c-sis5595
+F:     Documentation/i2c/busses/i2c-sis630
+F:     Documentation/i2c/busses/i2c-sis96x
+F:     Documentation/i2c/busses/i2c-via
+F:     Documentation/i2c/busses/i2c-viapro
+F:     drivers/i2c/busses/i2c-ali1535.c
+F:     drivers/i2c/busses/i2c-ali1563.c
+F:     drivers/i2c/busses/i2c-ali15x3.c
+F:     drivers/i2c/busses/i2c-amd756.c
+F:     drivers/i2c/busses/i2c-amd756-s4882.c
+F:     drivers/i2c/busses/i2c-amd8111.c
+F:     drivers/i2c/busses/i2c-i801.c
+F:     drivers/i2c/busses/i2c-isch.c
+F:     drivers/i2c/busses/i2c-nforce2.c
+F:     drivers/i2c/busses/i2c-nforce2-s4985.c
+F:     drivers/i2c/busses/i2c-piix4.c
+F:     drivers/i2c/busses/i2c-sis5595.c
+F:     drivers/i2c/busses/i2c-sis630.c
+F:     drivers/i2c/busses/i2c-sis96x.c
+F:     drivers/i2c/busses/i2c-via.c
+F:     drivers/i2c/busses/i2c-viapro.c
+
 I2C/SMBUS STUB DRIVER
 M:     "Mark M. Hoffman" <mhoffman@lightlink.com>
 L:     linux-i2c@vger.kernel.org
@@ -3605,9 +3665,8 @@ S:        Maintained
 F:     drivers/i2c/busses/i2c-stub.c
 
 I2C SUBSYSTEM
-M:     "Jean Delvare (PC drivers, core)" <khali@linux-fr.org>
+M:     Wolfram Sang <w.sang@pengutronix.de>
 M:     "Ben Dooks (embedded platforms)" <ben-linux@fluff.org>
-M:     "Wolfram Sang (embedded platforms)" <w.sang@pengutronix.de>
 L:     linux-i2c@vger.kernel.org
 W:     http://i2c.wiki.kernel.org/
 T:     quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/
@@ -3618,6 +3677,13 @@ F:       drivers/i2c/
 F:     include/linux/i2c.h
 F:     include/linux/i2c-*.h
 
+I2C-TAOS-EVM DRIVER
+M:     Jean Delvare <khali@linux-fr.org>
+L:     linux-i2c@vger.kernel.org
+S:     Maintained
+F:     Documentation/i2c/busses/i2c-taos-evm
+F:     drivers/i2c/busses/i2c-taos-evm.c
+
 I2C-TINY-USB DRIVER
 M:     Till Harbaum <till@harbaum.org>
 L:     linux-i2c@vger.kernel.org
@@ -3704,7 +3770,7 @@ S:        Maintained
 F:     drivers/platform/x86/ideapad-laptop.c
 
 IDE/ATAPI DRIVERS
-M:     Borislav Petkov <petkovbb@gmail.com>
+M:     Borislav Petkov <bp@alien8.de>
 L:     linux-ide@vger.kernel.org
 S:     Maintained
 F:     Documentation/cdrom/ide-cd
@@ -4231,8 +4297,8 @@ F:        include/linux/lockd/
 F:     include/linux/sunrpc/
 
 KERNEL VIRTUAL MACHINE (KVM)
-M:     Avi Kivity <avi@redhat.com>
 M:     Marcelo Tosatti <mtosatti@redhat.com>
+M:     Gleb Natapov <gleb@redhat.com>
 L:     kvm@vger.kernel.org
 W:     http://kvm.qumranet.com
 S:     Supported
@@ -5364,7 +5430,7 @@ S:        Maintained
 F:     sound/drivers/opl4/
 
 OPROFILE
-M:     Robert Richter <robert.richter@amd.com>
+M:     Robert Richter <rric@kernel.org>
 L:     oprofile-list@lists.sf.net
 S:     Maintained
 F:     arch/*/include/asm/oprofile*.h
@@ -7210,6 +7276,14 @@ L:       linux-xtensa@linux-xtensa.org
 S:     Maintained
 F:     arch/xtensa/
 
+THERMAL
+M:      Zhang Rui <rui.zhang@intel.com>
+L:      linux-pm@vger.kernel.org
+T:      git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
+S:      Supported
+F:      drivers/thermal/
+F:      include/linux/thermal.h
+
 THINKPAD ACPI EXTRAS DRIVER
 M:     Henrique de Moraes Holschuh <ibm-acpi@hmh.eng.br>
 L:     ibm-acpi-devel@lists.sourceforge.net
@@ -7887,13 +7961,6 @@ M:       Roger Luethi <rl@hellgate.ch>
 S:     Maintained
 F:     drivers/net/ethernet/via/via-rhine.c
 
-VIAPRO SMBUS DRIVER
-M:     Jean Delvare <khali@linux-fr.org>
-L:     linux-i2c@vger.kernel.org
-S:     Maintained
-F:     Documentation/i2c/busses/i2c-viapro
-F:     drivers/i2c/busses/i2c-viapro.c
-
 VIA SD/MMC CARD CONTROLLER DRIVER
 M:     Bruce Chang <brucechang@via.com.tw>
 M:     Harald Welte <HaraldWelte@viatech.com>
@@ -8148,7 +8215,7 @@ F:        drivers/platform/x86
 
 X86 MCE INFRASTRUCTURE
 M:     Tony Luck <tony.luck@intel.com>
-M:     Borislav Petkov <bp@amd64.org>
+M:     Borislav Petkov <bp@alien8.de>
 L:     linux-edac@vger.kernel.org
 S:     Maintained
 F:     arch/x86/kernel/cpu/mcheck/*
index a1ccf22..3d2fc46 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 7
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc7
 NAME = Terrified Chipmunk
 
 # *DOCUMENTATION*
index 1e6956a..14db93e 100644 (file)
@@ -445,7 +445,7 @@ struct procfs_args {
  * unhappy with OSF UFS. [CHECKME]
  */
 static int
-osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags)
+osf_ufs_mount(const char *dirname, struct ufs_args __user *args, int flags)
 {
        int retval;
        struct cdfs_args tmp;
@@ -465,7 +465,7 @@ osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags)
 }
 
 static int
-osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags)
+osf_cdfs_mount(const char *dirname, struct cdfs_args __user *args, int flags)
 {
        int retval;
        struct cdfs_args tmp;
@@ -485,7 +485,7 @@ osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags)
 }
 
 static int
-osf_procfs_mount(char *dirname, struct procfs_args __user *args, int flags)
+osf_procfs_mount(const char *dirname, struct procfs_args __user *args, int flags)
 {
        struct procfs_args tmp;
 
index f2aa09e..9137df5 100644 (file)
@@ -33,7 +33,7 @@ ifeq ($(CONFIG_XIP_KERNEL),y)
 
 $(obj)/xipImage: vmlinux FORCE
        $(call if_changed,objcopy)
-       $(kecho) '  Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))'
+       @$(kecho) '  Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))'
 
 $(obj)/Image $(obj)/zImage: FORCE
        @echo 'Kernel configured for XIP (CONFIG_XIP_KERNEL=y)'
@@ -48,14 +48,14 @@ $(obj)/xipImage: FORCE
 
 $(obj)/Image: vmlinux FORCE
        $(call if_changed,objcopy)
-       $(kecho) '  Kernel: $@ is ready'
+       @$(kecho) '  Kernel: $@ is ready'
 
 $(obj)/compressed/vmlinux: $(obj)/Image FORCE
        $(Q)$(MAKE) $(build)=$(obj)/compressed $@
 
 $(obj)/zImage: $(obj)/compressed/vmlinux FORCE
        $(call if_changed,objcopy)
-       $(kecho) '  Kernel: $@ is ready'
+       @$(kecho) '  Kernel: $@ is ready'
 
 endif
 
@@ -90,7 +90,7 @@ fi
 $(obj)/uImage: $(obj)/zImage FORCE
        @$(check_for_multiple_loadaddr)
        $(call if_changed,uimage)
-       $(kecho) '  Image $@ is ready'
+       @$(kecho) '  Image $@ is ready'
 
 $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
        $(Q)$(MAKE) $(build)=$(obj)/bootp $@
@@ -98,7 +98,7 @@ $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
 
 $(obj)/bootpImage: $(obj)/bootp/bootp FORCE
        $(call if_changed,objcopy)
-       $(kecho) '  Kernel: $@ is ready'
+       @$(kecho) '  Kernel: $@ is ready'
 
 PHONY += initrd FORCE
 initrd:
index b1497c7..df7f227 100644 (file)
@@ -73,8 +73,8 @@
 
        pinmux: pinmux {
                compatible = "nvidia,tegra30-pinmux";
-               reg = <0x70000868 0xd0    /* Pad control registers */
-                      0x70003000 0x3e0>; /* Mux registers */
+               reg = <0x70000868 0xd4    /* Pad control registers */
+                      0x70003000 0x3e4>; /* Mux registers */
        };
 
        serial@70006000 {
index 35c1ed8..42f042e 100644 (file)
@@ -64,7 +64,7 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
 static inline void __raw_writew(u16 val, volatile void __iomem *addr)
 {
        asm volatile("strh %1, %0"
-                    : "+Qo" (*(volatile u16 __force *)addr)
+                    : "+Q" (*(volatile u16 __force *)addr)
                     : "r" (val));
 }
 
@@ -72,7 +72,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr)
 {
        u16 val;
        asm volatile("ldrh %1, %0"
-                    : "+Qo" (*(volatile u16 __force *)addr),
+                    : "+Q" (*(volatile u16 __force *)addr),
                       "=r" (val));
        return val;
 }
index 05b8e82..e3f7572 100644 (file)
@@ -10,7 +10,5 @@
 
 extern void sched_clock_postinit(void);
 extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
-extern void setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
-               unsigned long rate);
 
 #endif
index 6a6f1e4..301c1db 100644 (file)
@@ -27,9 +27,9 @@
 #if __LINUX_ARM_ARCH__ <= 6
        ldr     \tmp, =elf_hwcap                    @ may not have MVFR regs
        ldr     \tmp, [\tmp, #0]
-       tst     \tmp, #HWCAP_VFPv3D16
-       ldceql  p11, cr0, [\base],#32*4             @ FLDMIAD \base!, {d16-d31}
-       addne   \base, \base, #32*4                 @ step over unused register space
+       tst     \tmp, #HWCAP_VFPD32
+       ldcnel  p11, cr0, [\base],#32*4             @ FLDMIAD \base!, {d16-d31}
+       addeq   \base, \base, #32*4                 @ step over unused register space
 #else
        VFPFMRX \tmp, MVFR0                         @ Media and VFP Feature Register 0
        and     \tmp, \tmp, #MVFR0_A_SIMD_MASK      @ A_SIMD field
@@ -51,9 +51,9 @@
 #if __LINUX_ARM_ARCH__ <= 6
        ldr     \tmp, =elf_hwcap                    @ may not have MVFR regs
        ldr     \tmp, [\tmp, #0]
-       tst     \tmp, #HWCAP_VFPv3D16
-       stceql  p11, cr0, [\base],#32*4             @ FSTMIAD \base!, {d16-d31}
-       addne   \base, \base, #32*4                 @ step over unused register space
+       tst     \tmp, #HWCAP_VFPD32
+       stcnel  p11, cr0, [\base],#32*4             @ FSTMIAD \base!, {d16-d31}
+       addeq   \base, \base, #32*4                 @ step over unused register space
 #else
        VFPFMRX \tmp, MVFR0                         @ Media and VFP Feature Register 0
        and     \tmp, \tmp, #MVFR0_A_SIMD_MASK      @ A_SIMD field
index f254f65..3688fd1 100644 (file)
 #define HWCAP_THUMBEE  (1 << 11)
 #define HWCAP_NEON     (1 << 12)
 #define HWCAP_VFPv3    (1 << 13)
-#define HWCAP_VFPv3D16 (1 << 14)
+#define HWCAP_VFPv3D16 (1 << 14)       /* also set for VFPv4-D16 */
 #define HWCAP_TLS      (1 << 15)
 #define HWCAP_VFPv4    (1 << 16)
 #define HWCAP_IDIVA    (1 << 17)
 #define HWCAP_IDIVT    (1 << 18)
+#define HWCAP_VFPD32   (1 << 19)       /* set if VFP has 32 regs (not 16) */
 #define HWCAP_IDIV     (HWCAP_IDIVA | HWCAP_IDIVT)
 
 
index e21bac2..fc6692e 100644 (file)
@@ -107,13 +107,6 @@ static void sched_clock_poll(unsigned long wrap_ticks)
        update_sched_clock();
 }
 
-void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
-               unsigned long rate)
-{
-       setup_sched_clock(read, bits, rate);
-       cd.needs_suspend = true;
-}
-
 void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
 {
        unsigned long r, w;
@@ -189,18 +182,15 @@ void __init sched_clock_postinit(void)
 static int sched_clock_suspend(void)
 {
        sched_clock_poll(sched_clock_timer.data);
-       if (cd.needs_suspend)
-               cd.suspended = true;
+       cd.suspended = true;
        return 0;
 }
 
 static void sched_clock_resume(void)
 {
-       if (cd.needs_suspend) {
-               cd.epoch_cyc = read_sched_clock();
-               cd.epoch_cyc_copy = cd.epoch_cyc;
-               cd.suspended = false;
-       }
+       cd.epoch_cyc = read_sched_clock();
+       cd.epoch_cyc_copy = cd.epoch_cyc;
+       cd.suspended = false;
 }
 
 static struct syscore_ops sched_clock_ops = {
index 1e122bc..3cee0e6 100644 (file)
@@ -68,7 +68,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data)
 
        /* Enable overcurrent notification */
        for (i = 0; i < data->ports; i++) {
-               if (data->overcurrent_pin[i])
+               if (gpio_is_valid(data->overcurrent_pin[i]))
                        at91_set_gpio_input(data->overcurrent_pin[i], 1);
        }
 
index aa1e587..414bd85 100644 (file)
@@ -72,7 +72,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data)
 
        /* Enable overcurrent notification */
        for (i = 0; i < data->ports; i++) {
-               if (data->overcurrent_pin[i])
+               if (gpio_is_valid(data->overcurrent_pin[i]))
                        at91_set_gpio_input(data->overcurrent_pin[i], 1);
        }
 
index b948769..cd604aa 100644 (file)
@@ -72,7 +72,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data)
 
        /* Enable overcurrent notification */
        for (i = 0; i < data->ports; i++) {
-               if (data->overcurrent_pin[i])
+               if (gpio_is_valid(data->overcurrent_pin[i]))
                        at91_set_gpio_input(data->overcurrent_pin[i], 1);
        }
 
index cb85da2..9c61e59 100644 (file)
@@ -78,7 +78,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data)
 
        /* Enable overcurrent notification */
        for (i = 0; i < data->ports; i++) {
-               if (data->overcurrent_pin[i])
+               if (gpio_is_valid(data->overcurrent_pin[i]))
                        at91_set_gpio_input(data->overcurrent_pin[i], 1);
        }
 
index b159607..fcd233c 100644 (file)
@@ -1841,8 +1841,8 @@ static struct resource sha_resources[] = {
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
-               .start  = AT91SAM9G45_ID_AESTDESSHA,
-               .end    = AT91SAM9G45_ID_AESTDESSHA,
+               .start  = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
+               .end    = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
                .flags  = IORESOURCE_IRQ,
        },
 };
@@ -1874,8 +1874,8 @@ static struct resource tdes_resources[] = {
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
-               .start  = AT91SAM9G45_ID_AESTDESSHA,
-               .end    = AT91SAM9G45_ID_AESTDESSHA,
+               .start  = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
+               .end    = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
                .flags  = IORESOURCE_IRQ,
        },
 };
@@ -1910,8 +1910,8 @@ static struct resource aes_resources[] = {
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
-               .start  = AT91SAM9G45_ID_AESTDESSHA,
-               .end    = AT91SAM9G45_ID_AESTDESSHA,
+               .start  = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
+               .end    = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
                .flags  = IORESOURCE_IRQ,
        },
 };
index cd0c8b1..14e9947 100644 (file)
@@ -713,8 +713,7 @@ static int dm644x_venc_setup_clock(enum vpbe_enc_timings_type type,
                break;
        case VPBE_ENC_CUSTOM_TIMINGS:
                if (pclock <= 27000000) {
-                       v |= DM644X_VPSS_MUXSEL_PLL2_MODE |
-                            DM644X_VPSS_DACCLKEN;
+                       v |= DM644X_VPSS_DACCLKEN;
                        writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL));
                } else {
                        /*
index 21d568b..87e07d6 100644 (file)
@@ -275,6 +275,9 @@ static int __init exynos_dma_init(void)
                exynos_pdma1_pdata.nr_valid_peri =
                        ARRAY_SIZE(exynos4210_pdma1_peri);
                exynos_pdma1_pdata.peri_id = exynos4210_pdma1_peri;
+
+               if (samsung_rev() == EXYNOS4210_REV_0)
+                       exynos_mdma1_device.res.start = EXYNOS4_PA_S_MDMA1;
        } else if (soc_is_exynos4212() || soc_is_exynos4412()) {
                exynos_pdma0_pdata.nr_valid_peri =
                        ARRAY_SIZE(exynos4212_pdma0_peri);
index 8480849..ed4da45 100644 (file)
@@ -90,6 +90,7 @@
 
 #define EXYNOS4_PA_MDMA0               0x10810000
 #define EXYNOS4_PA_MDMA1               0x12850000
+#define EXYNOS4_PA_S_MDMA1             0x12840000
 #define EXYNOS4_PA_PDMA0               0x12680000
 #define EXYNOS4_PA_PDMA1               0x12690000
 #define EXYNOS5_PA_MDMA0               0x10800000
index 82c2723..86e37cd 100644 (file)
@@ -28,6 +28,7 @@ void highbank_restart(char mode, const char *cmd)
                hignbank_set_pwr_soft_reset();
 
        scu_power_mode(scu_base_addr, SCU_PM_POWEROFF);
-       cpu_do_idle();
+       while (1)
+               cpu_do_idle();
 }
 
index 3c1b8ff..cc49c7a 100644 (file)
@@ -112,7 +112,7 @@ struct clk *clk_register_gate2(struct device *dev, const char *name,
 
        clk = clk_register(dev, &gate->hw);
        if (IS_ERR(clk))
-               kfree(clk);
+               kfree(gate);
 
        return clk;
 }
index 412c583..576af74 100644 (file)
@@ -30,7 +30,7 @@
 #define MX25_H1_SIC_SHIFT      21
 #define MX25_H1_SIC_MASK       (0x3 << MX25_H1_SIC_SHIFT)
 #define MX25_H1_PP_BIT         (1 << 18)
-#define MX25_H1_PM_BIT         (1 << 8)
+#define MX25_H1_PM_BIT         (1 << 16)
 #define MX25_H1_IPPUE_UP_BIT   (1 << 7)
 #define MX25_H1_IPPUE_DOWN_BIT (1 << 6)
 #define MX25_H1_TLL_BIT                (1 << 5)
index 779e16e..2933978 100644 (file)
@@ -30,7 +30,7 @@
 #define MX35_H1_SIC_SHIFT      21
 #define MX35_H1_SIC_MASK       (0x3 << MX35_H1_SIC_SHIFT)
 #define MX35_H1_PP_BIT         (1 << 18)
-#define MX35_H1_PM_BIT         (1 << 8)
+#define MX35_H1_PM_BIT         (1 << 16)
 #define MX35_H1_IPPUE_UP_BIT   (1 << 7)
 #define MX35_H1_IPPUE_DOWN_BIT (1 << 6)
 #define MX35_H1_TLL_BIT                (1 << 5)
index 48d5e41..3785906 100644 (file)
@@ -580,6 +580,11 @@ static void __init igep_wlan_bt_init(void)
        } else
                return;
 
+       /* Make sure that the GPIO pins are muxed correctly */
+       omap_mux_init_gpio(igep_wlan_bt_gpios[0].gpio, OMAP_PIN_OUTPUT);
+       omap_mux_init_gpio(igep_wlan_bt_gpios[1].gpio, OMAP_PIN_OUTPUT);
+       omap_mux_init_gpio(igep_wlan_bt_gpios[2].gpio, OMAP_PIN_OUTPUT);
+
        err = gpio_request_array(igep_wlan_bt_gpios,
                                 ARRAY_SIZE(igep_wlan_bt_gpios));
        if (err) {
index b56d06b..95192a0 100644 (file)
@@ -359,7 +359,7 @@ static struct clockdomain iss_44xx_clkdm = {
        .clkdm_offs       = OMAP4430_CM2_CAM_CAM_CDOFFS,
        .wkdep_srcs       = iss_wkup_sleep_deps,
        .sleepdep_srcs    = iss_wkup_sleep_deps,
-       .flags            = CLKDM_CAN_HWSUP_SWSUP,
+       .flags            = CLKDM_CAN_SWSUP,
 };
 
 static struct clockdomain l3_dss_44xx_clkdm = {
index 48daac2..84551f2 100644 (file)
@@ -64,30 +64,36 @@ void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
        struct spi_board_info *spi_bi = &ads7846_spi_board_info;
        int err;
 
-       err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown");
-       if (err) {
-               pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err);
-               return;
-       }
+       /*
+        * If a board defines get_pendown_state() function, request the pendown
+        * GPIO and set the GPIO debounce time.
+        * If a board does not define the get_pendown_state() function, then
+        * the ads7846 driver will setup the pendown GPIO itself.
+        */
+       if (board_pdata && board_pdata->get_pendown_state) {
+               err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown");
+               if (err) {
+                       pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err);
+                       return;
+               }
 
-       if (gpio_debounce)
-               gpio_set_debounce(gpio_pendown, gpio_debounce);
+               if (gpio_debounce)
+                       gpio_set_debounce(gpio_pendown, gpio_debounce);
+
+               gpio_export(gpio_pendown, 0);
+       }
 
        spi_bi->bus_num = bus_num;
        spi_bi->irq     = gpio_to_irq(gpio_pendown);
 
+       ads7846_config.gpio_pendown = gpio_pendown;
+
        if (board_pdata) {
                board_pdata->gpio_pendown = gpio_pendown;
+               board_pdata->gpio_pendown_debounce = gpio_debounce;
                spi_bi->platform_data = board_pdata;
-               if (board_pdata->get_pendown_state)
-                       gpio_export(gpio_pendown, 0);
-       } else {
-               ads7846_config.gpio_pendown = gpio_pendown;
        }
 
-       if (!board_pdata || (board_pdata && !board_pdata->get_pendown_state))
-               gpio_free(gpio_pendown);
-
        spi_register_board_info(&ads7846_spi_board_info, 1);
 }
 #else
index cba60e0..c72b5a7 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/of.h>
 #include <linux/pinctrl/machine.h>
 #include <linux/platform_data/omap4-keypad.h>
+#include <linux/platform_data/omap_ocp2scp.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/map.h>
@@ -613,6 +614,83 @@ static void omap_init_vout(void)
 static inline void omap_init_vout(void) {}
 #endif
 
+#if defined(CONFIG_OMAP_OCP2SCP) || defined(CONFIG_OMAP_OCP2SCP_MODULE)
+static int count_ocp2scp_devices(struct omap_ocp2scp_dev *ocp2scp_dev)
+{
+       int cnt = 0;
+
+       while (ocp2scp_dev->drv_name != NULL) {
+               cnt++;
+               ocp2scp_dev++;
+       }
+
+       return cnt;
+}
+
+static void omap_init_ocp2scp(void)
+{
+       struct omap_hwmod       *oh;
+       struct platform_device  *pdev;
+       int                     bus_id = -1, dev_cnt = 0, i;
+       struct omap_ocp2scp_dev *ocp2scp_dev;
+       const char              *oh_name, *name;
+       struct omap_ocp2scp_platform_data *pdata;
+
+       if (!cpu_is_omap44xx())
+               return;
+
+       oh_name = "ocp2scp_usb_phy";
+       name    = "omap-ocp2scp";
+
+       oh = omap_hwmod_lookup(oh_name);
+       if (!oh) {
+               pr_err("%s: could not find omap_hwmod for %s\n", __func__,
+                                                               oh_name);
+               return;
+       }
+
+       pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+       if (!pdata) {
+               pr_err("%s: No memory for ocp2scp pdata\n", __func__);
+               return;
+       }
+
+       ocp2scp_dev = oh->dev_attr;
+       dev_cnt = count_ocp2scp_devices(ocp2scp_dev);
+
+       if (!dev_cnt) {
+               pr_err("%s: No devices connected to ocp2scp\n", __func__);
+               kfree(pdata);
+               return;
+       }
+
+       pdata->devices = kzalloc(sizeof(struct omap_ocp2scp_dev *)
+                                       * dev_cnt, GFP_KERNEL);
+       if (!pdata->devices) {
+               pr_err("%s: No memory for ocp2scp pdata devices\n", __func__);
+               kfree(pdata);
+               return;
+       }
+
+       for (i = 0; i < dev_cnt; i++, ocp2scp_dev++)
+               pdata->devices[i] = ocp2scp_dev;
+
+       pdata->dev_cnt  = dev_cnt;
+
+       pdev = omap_device_build(name, bus_id, oh, pdata, sizeof(*pdata), NULL,
+                                                               0, false);
+       if (IS_ERR(pdev)) {
+               pr_err("Could not build omap_device for %s %s\n",
+                                               name, oh_name);
+               kfree(pdata->devices);
+               kfree(pdata);
+               return;
+       }
+}
+#else
+static inline void omap_init_ocp2scp(void) { }
+#endif
+
 /*-------------------------------------------------------------------------*/
 
 static int __init omap2_init_devices(void)
@@ -640,6 +718,7 @@ static int __init omap2_init_devices(void)
        omap_init_sham();
        omap_init_aes();
        omap_init_vout();
+       omap_init_ocp2scp();
 
        return 0;
 }
index b969ab1..87cc6d0 100644 (file)
@@ -422,6 +422,38 @@ static int _set_softreset(struct omap_hwmod *oh, u32 *v)
 }
 
 /**
+ * _wait_softreset_complete - wait for an OCP softreset to complete
+ * @oh: struct omap_hwmod * to wait on
+ *
+ * Wait until the IP block represented by @oh reports that its OCP
+ * softreset is complete.  This can be triggered by software (see
+ * _ocp_softreset()) or by hardware upon returning from off-mode (one
+ * example is HSMMC).  Waits for up to MAX_MODULE_SOFTRESET_WAIT
+ * microseconds.  Returns the number of microseconds waited.
+ */
+static int _wait_softreset_complete(struct omap_hwmod *oh)
+{
+       struct omap_hwmod_class_sysconfig *sysc;
+       u32 softrst_mask;
+       int c = 0;
+
+       sysc = oh->class->sysc;
+
+       if (sysc->sysc_flags & SYSS_HAS_RESET_STATUS)
+               omap_test_timeout((omap_hwmod_read(oh, sysc->syss_offs)
+                                  & SYSS_RESETDONE_MASK),
+                                 MAX_MODULE_SOFTRESET_WAIT, c);
+       else if (sysc->sysc_flags & SYSC_HAS_RESET_STATUS) {
+               softrst_mask = (0x1 << sysc->sysc_fields->srst_shift);
+               omap_test_timeout(!(omap_hwmod_read(oh, sysc->sysc_offs)
+                                   & softrst_mask),
+                                 MAX_MODULE_SOFTRESET_WAIT, c);
+       }
+
+       return c;
+}
+
+/**
  * _set_dmadisable: set OCP_SYSCONFIG.DMADISABLE bit in @v
  * @oh: struct omap_hwmod *
  *
@@ -1282,6 +1314,18 @@ static void _enable_sysc(struct omap_hwmod *oh)
        if (!oh->class->sysc)
                return;
 
+       /*
+        * Wait until reset has completed, this is needed as the IP
+        * block is reset automatically by hardware in some cases
+        * (off-mode for example), and the drivers require the
+        * IP to be ready when they access it
+        */
+       if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
+               _enable_optional_clocks(oh);
+       _wait_softreset_complete(oh);
+       if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
+               _disable_optional_clocks(oh);
+
        v = oh->_sysc_cache;
        sf = oh->class->sysc->sysc_flags;
 
@@ -1804,7 +1848,7 @@ static int _am33xx_disable_module(struct omap_hwmod *oh)
  */
 static int _ocp_softreset(struct omap_hwmod *oh)
 {
-       u32 v, softrst_mask;
+       u32 v;
        int c = 0;
        int ret = 0;
 
@@ -1834,19 +1878,7 @@ static int _ocp_softreset(struct omap_hwmod *oh)
        if (oh->class->sysc->srst_udelay)
                udelay(oh->class->sysc->srst_udelay);
 
-       if (oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS)
-               omap_test_timeout((omap_hwmod_read(oh,
-                                                   oh->class->sysc->syss_offs)
-                                  & SYSS_RESETDONE_MASK),
-                                 MAX_MODULE_SOFTRESET_WAIT, c);
-       else if (oh->class->sysc->sysc_flags & SYSC_HAS_RESET_STATUS) {
-               softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift);
-               omap_test_timeout(!(omap_hwmod_read(oh,
-                                                    oh->class->sysc->sysc_offs)
-                                  & softrst_mask),
-                                 MAX_MODULE_SOFTRESET_WAIT, c);
-       }
-
+       c = _wait_softreset_complete(oh);
        if (c == MAX_MODULE_SOFTRESET_WAIT)
                pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n",
                           oh->name, MAX_MODULE_SOFTRESET_WAIT);
@@ -2352,6 +2384,9 @@ static int __init _setup_reset(struct omap_hwmod *oh)
        if (oh->_state != _HWMOD_STATE_INITIALIZED)
                return -EINVAL;
 
+       if (oh->flags & HWMOD_EXT_OPT_MAIN_CLK)
+               return -EPERM;
+
        if (oh->rst_lines_cnt == 0) {
                r = _enable(oh);
                if (r) {
index 652d028..0b1249e 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/io.h>
 #include <linux/platform_data/gpio-omap.h>
 #include <linux/power/smartreflex.h>
+#include <linux/platform_data/omap_ocp2scp.h>
 
 #include <plat/omap_hwmod.h>
 #include <plat/i2c.h>
@@ -2125,6 +2126,14 @@ static struct omap_hwmod omap44xx_mcpdm_hwmod = {
        .name           = "mcpdm",
        .class          = &omap44xx_mcpdm_hwmod_class,
        .clkdm_name     = "abe_clkdm",
+       /*
+        * It's suspected that the McPDM requires an off-chip main
+        * functional clock, controlled via I2C.  This IP block is
+        * currently reset very early during boot, before I2C is
+        * available, so it doesn't seem that we have any choice in
+        * the kernel other than to avoid resetting it.
+        */
+       .flags          = HWMOD_EXT_OPT_MAIN_CLK,
        .mpu_irqs       = omap44xx_mcpdm_irqs,
        .sdma_reqs      = omap44xx_mcpdm_sdma_reqs,
        .main_clk       = "mcpdm_fck",
@@ -2681,6 +2690,32 @@ static struct omap_hwmod_class omap44xx_ocp2scp_hwmod_class = {
        .sysc   = &omap44xx_ocp2scp_sysc,
 };
 
+/* ocp2scp dev_attr */
+static struct resource omap44xx_usb_phy_and_pll_addrs[] = {
+       {
+               .name           = "usb_phy",
+               .start          = 0x4a0ad080,
+               .end            = 0x4a0ae000,
+               .flags          = IORESOURCE_MEM,
+       },
+       {
+               /* XXX: Remove this once control module driver is in place */
+               .name           = "ctrl_dev",
+               .start          = 0x4a002300,
+               .end            = 0x4a002303,
+               .flags          = IORESOURCE_MEM,
+       },
+       { }
+};
+
+static struct omap_ocp2scp_dev ocp2scp_dev_attr[] = {
+       {
+               .drv_name       = "omap-usb2",
+               .res            = omap44xx_usb_phy_and_pll_addrs,
+       },
+       { }
+};
+
 /* ocp2scp_usb_phy */
 static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
        .name           = "ocp2scp_usb_phy",
@@ -2694,6 +2729,7 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
                        .modulemode   = MODULEMODE_HWCTRL,
                },
        },
+       .dev_attr       = ocp2scp_dev_attr,
 };
 
 /*
index 635e109..a256135 100644 (file)
@@ -73,6 +73,7 @@ void __init omap4_pmic_init(const char *pmic_type,
 {
        /* PMIC part*/
        omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
+       omap_mux_init_signal("fref_clk0_out.sys_drm_msecure", OMAP_PIN_OUTPUT);
        omap_pmic_init(1, 400, pmic_type, 7 + OMAP44XX_IRQ_GIC_START, pmic_data);
 
        /* Register additional devices on i2c1 bus if needed */
@@ -366,7 +367,7 @@ static struct regulator_init_data omap4_clk32kg_idata = {
 };
 
 static struct regulator_consumer_supply omap4_vdd1_supply[] = {
-       REGULATOR_SUPPLY("vcc", "mpu.0"),
+       REGULATOR_SUPPLY("vcc", "cpu0"),
 };
 
 static struct regulator_consumer_supply omap4_vdd2_supply[] = {
index 880249b..75878c3 100644 (file)
@@ -264,7 +264,7 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm)
 
        if (initialized) {
                if (voltdm->pmic->i2c_high_speed != i2c_high_speed)
-                       pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).",
+                       pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).\n",
                                __func__, voltdm->name, i2c_high_speed);
                return;
        }
index 5ecbd17..e2c6391 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/mfd/asic3.h>
 #include <linux/mtd/physmap.h>
 #include <linux/pda_power.h>
+#include <linux/pwm.h>
 #include <linux/pwm_backlight.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/gpio-regulator.h>
@@ -556,7 +557,7 @@ static struct platform_device hx4700_lcd = {
  */
 
 static struct platform_pwm_backlight_data backlight_data = {
-       .pwm_id         = 1,
+       .pwm_id         = -1,   /* Superseded by pwm_lookup */
        .max_brightness = 200,
        .dft_brightness = 100,
        .pwm_period_ns  = 30923,
@@ -571,6 +572,10 @@ static struct platform_device backlight = {
        },
 };
 
+static struct pwm_lookup hx4700_pwm_lookup[] = {
+       PWM_LOOKUP("pxa27x-pwm.1", 0, "pwm-backlight", NULL),
+};
+
 /*
  * USB "Transceiver"
  */
@@ -872,6 +877,7 @@ static void __init hx4700_init(void)
        pxa_set_stuart_info(NULL);
 
        platform_add_devices(devices, ARRAY_SIZE(devices));
+       pwm_add_table(hx4700_pwm_lookup, ARRAY_SIZE(hx4700_pwm_lookup));
 
        pxa_set_ficp_info(&ficp_info);
        pxa27x_set_i2c_power_info(NULL);
index 438f02f..842596d 100644 (file)
@@ -86,10 +86,7 @@ static void spitz_discharge1(int on)
        gpio_set_value(SPITZ_GPIO_LED_GREEN, on);
 }
 
-static unsigned long gpio18_config[] = {
-       GPIO18_RDY,
-       GPIO18_GPIO,
-};
+static unsigned long gpio18_config = GPIO18_GPIO;
 
 static void spitz_presuspend(void)
 {
@@ -112,7 +109,7 @@ static void spitz_presuspend(void)
        PGSR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
        PGSR2 |= GPIO_bit(SPITZ_GPIO_KEY_STROBE0);
 
-       pxa2xx_mfp_config(&gpio18_config[0], 1);
+       pxa2xx_mfp_config(&gpio18_config, 1);
        gpio_request_one(18, GPIOF_OUT_INIT_HIGH, "Unknown");
        gpio_free(18);
 
@@ -131,7 +128,6 @@ static void spitz_presuspend(void)
 
 static void spitz_postsuspend(void)
 {
-       pxa2xx_mfp_config(&gpio18_config[1], 1);
 }
 
 static int spitz_should_wakeup(unsigned int resume_on_alarm)
index 023f443..b820eda 100644 (file)
@@ -745,7 +745,7 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
 static int
 do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 {
-       union offset_union offset;
+       union offset_union uninitialized_var(offset);
        unsigned long instr = 0, instrptr;
        int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
        unsigned int type;
index 58bc3e4..f076f20 100644 (file)
@@ -1036,7 +1036,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
        spin_unlock_irqrestore(&mapping->lock, flags);
 }
 
-static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
+static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
+                                         gfp_t gfp, struct dma_attrs *attrs)
 {
        struct page **pages;
        int count = size >> PAGE_SHIFT;
@@ -1050,6 +1051,23 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
        if (!pages)
                return NULL;
 
+       if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
+       {
+               unsigned long order = get_order(size);
+               struct page *page;
+
+               page = dma_alloc_from_contiguous(dev, count, order);
+               if (!page)
+                       goto error;
+
+               __dma_clear_buffer(page, size);
+
+               for (i = 0; i < count; i++)
+                       pages[i] = page + i;
+
+               return pages;
+       }
+
        while (count) {
                int j, order = __fls(count);
 
@@ -1083,14 +1101,21 @@ error:
        return NULL;
 }
 
-static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size)
+static int __iommu_free_buffer(struct device *dev, struct page **pages,
+                              size_t size, struct dma_attrs *attrs)
 {
        int count = size >> PAGE_SHIFT;
        int array_size = count * sizeof(struct page *);
        int i;
-       for (i = 0; i < count; i++)
-               if (pages[i])
-                       __free_pages(pages[i], 0);
+
+       if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
+               dma_release_from_contiguous(dev, pages[0], count);
+       } else {
+               for (i = 0; i < count; i++)
+                       if (pages[i])
+                               __free_pages(pages[i], 0);
+       }
+
        if (array_size <= PAGE_SIZE)
                kfree(pages);
        else
@@ -1252,7 +1277,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
        if (gfp & GFP_ATOMIC)
                return __iommu_alloc_atomic(dev, size, handle);
 
-       pages = __iommu_alloc_buffer(dev, size, gfp);
+       pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
        if (!pages)
                return NULL;
 
@@ -1273,7 +1298,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
 err_mapping:
        __iommu_remove_mapping(dev, *handle, size);
 err_buffer:
-       __iommu_free_buffer(dev, pages, size);
+       __iommu_free_buffer(dev, pages, size, attrs);
        return NULL;
 }
 
@@ -1329,7 +1354,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
        }
 
        __iommu_remove_mapping(dev, handle, size);
-       __iommu_free_buffer(dev, pages, size);
+       __iommu_free_buffer(dev, pages, size, attrs);
 }
 
 static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
index a5683a8..6013831 100644 (file)
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
+#include <linux/i2c-omap.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/clk.h>
 
 #include <mach/irqs.h>
 #include <plat/i2c.h>
+#include <plat/omap-pm.h>
 #include <plat/omap_device.h>
 
 #define OMAP_I2C_SIZE          0x3f
@@ -127,6 +129,16 @@ static inline int omap1_i2c_add_bus(int bus_id)
 
 
 #ifdef CONFIG_ARCH_OMAP2PLUS
+/*
+ * XXX This function is a temporary compatibility wrapper - only
+ * needed until the I2C driver can be converted to call
+ * omap_pm_set_max_dev_wakeup_lat() and handle a return code.
+ */
+static void omap_pm_set_max_mpu_wakeup_lat_compat(struct device *dev, long t)
+{
+       omap_pm_set_max_mpu_wakeup_lat(dev, t);
+}
+
 static inline int omap2_i2c_add_bus(int bus_id)
 {
        int l;
@@ -158,6 +170,15 @@ static inline int omap2_i2c_add_bus(int bus_id)
        dev_attr = (struct omap_i2c_dev_attr *)oh->dev_attr;
        pdata->flags = dev_attr->flags;
 
+       /*
+        * When waiting for completion of a i2c transfer, we need to
+        * set a wake up latency constraint for the MPU. This is to
+        * ensure quick enough wakeup from idle, when transfer
+        * completes.
+        * Only omap3 has support for constraints
+        */
+       if (cpu_is_omap34xx())
+               pdata->set_mpu_wkup_lat = omap_pm_set_max_mpu_wakeup_lat_compat;
        pdev = omap_device_build(name, bus_id, oh, pdata,
                        sizeof(struct omap_i2c_bus_platform_data),
                        NULL, 0, 0);
index b3349f7..1db0294 100644 (file)
@@ -443,6 +443,11 @@ struct omap_hwmod_omap4_prcm {
  *     in order to complete the reset. Optional clocks will be disabled
  *     again after the reset.
  * HWMOD_16BIT_REG: Module has 16bit registers
+ * HWMOD_EXT_OPT_MAIN_CLK: The only main functional clock source for
+ *     this IP block comes from an off-chip source and is not always
+ *     enabled.  This prevents the hwmod code from being able to
+ *     enable and reset the IP block early.  XXX Eventually it should
+ *     be possible to query the clock framework for this information.
  */
 #define HWMOD_SWSUP_SIDLE                      (1 << 0)
 #define HWMOD_SWSUP_MSTANDBY                   (1 << 1)
@@ -453,6 +458,7 @@ struct omap_hwmod_omap4_prcm {
 #define HWMOD_NO_IDLEST                                (1 << 6)
 #define HWMOD_CONTROL_OPT_CLKS_IN_RESET                (1 << 7)
 #define HWMOD_16BIT_REG                                (1 << 8)
+#define HWMOD_EXT_OPT_MAIN_CLK                 (1 << 9)
 
 /*
  * omap_hwmod._int_flags definitions
index cd60a81..32d05c8 100644 (file)
@@ -5,6 +5,6 @@
 #
 
 include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types
-       $(kecho) '  Generating $@'
+       @$(kecho) '  Generating $@'
        @mkdir -p $(dir $@)
        $(Q)$(AWK) -f $^ > $@ || { rm -f $@; /bin/false; }
index c834b32..3b44e0d 100644 (file)
@@ -701,11 +701,14 @@ static int __init vfp_init(void)
                        elf_hwcap |= HWCAP_VFPv3;
 
                        /*
-                        * Check for VFPv3 D16. CPUs in this configuration
-                        * only have 16 x 64bit registers.
+                        * Check for VFPv3 D16 and VFPv4 D16.  CPUs in
+                        * this configuration only have 16 x 64bit
+                        * registers.
                         */
                        if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
-                               elf_hwcap |= HWCAP_VFPv3D16;
+                               elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */
+                       else
+                               elf_hwcap |= HWCAP_VFPD32;
                }
 #endif
                /*
index 59bcb96..f576092 100644 (file)
@@ -166,3 +166,14 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
        *pages = NULL;
 }
 EXPORT_SYMBOL_GPL(free_xenballooned_pages);
+
+/* In the hypervisor.S file. */
+EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op);
+EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op);
+EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version);
+EXPORT_SYMBOL_GPL(HYPERVISOR_console_io);
+EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op);
+EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op);
+EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op);
+EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op);
+EXPORT_SYMBOL_GPL(privcmd_call);
index ef54a59..15ac18a 100644 (file)
@@ -1,6 +1,7 @@
 config ARM64
        def_bool y
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+       select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
        select GENERIC_CLOCKEVENTS
        select GENERIC_HARDIRQS_NO_DEPRECATED
        select GENERIC_IOMAP
index cf28464..07fea29 100644 (file)
 #include <asm/user.h>
 
 typedef unsigned long elf_greg_t;
-typedef unsigned long elf_freg_t[3];
 
 #define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
 typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-
-typedef struct user_fp elf_fpregset_t;
+typedef struct user_fpsimd_state elf_fpregset_t;
 
 #define EM_AARCH64             183
 
@@ -87,7 +85,6 @@ typedef struct user_fp elf_fpregset_t;
 #define R_AARCH64_MOVW_PREL_G2_NC      292
 #define R_AARCH64_MOVW_PREL_G3         293
 
-
 /*
  * These are used to set parameters in the core dumps.
  */
index b42fab9..c43b4ac 100644 (file)
@@ -25,9 +25,8 @@
  *  - FPSR and FPCR
  *  - 32 128-bit data registers
  *
- * Note that user_fp forms a prefix of this structure, which is relied
- * upon in the ptrace FP/SIMD accessors. struct user_fpsimd_state must
- * form a prefix of struct fpsimd_state.
+ * Note that user_fpsimd forms a prefix of this structure, which is
+ * relied upon in the ptrace FP/SIMD accessors.
  */
 struct fpsimd_state {
        union {
index 74a2a7d..d2f05a6 100644 (file)
@@ -114,7 +114,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
  *  I/O port access primitives.
  */
 #define IO_SPACE_LIMIT         0xffff
-#define PCI_IOBASE             ((void __iomem *)0xffffffbbfffe0000UL)
+#define PCI_IOBASE             ((void __iomem *)(MODULES_VADDR - SZ_2M))
 
 static inline u8 inb(unsigned long addr)
 {
@@ -222,12 +222,12 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot
 extern void __iounmap(volatile void __iomem *addr);
 
 #define PROT_DEFAULT           (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
-#define PROT_DEVICE_nGnRE      (PROT_DEFAULT | PTE_XN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+#define PROT_DEVICE_nGnRE      (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
 #define PROT_NORMAL_NC         (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
 
-#define ioremap(addr, size)            __ioremap((addr), (size), PROT_DEVICE_nGnRE)
-#define ioremap_nocache(addr, size)    __ioremap((addr), (size), PROT_DEVICE_nGnRE)
-#define ioremap_wc(addr, size)         __ioremap((addr), (size), PROT_NORMAL_NC)
+#define ioremap(addr, size)            __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
+#define ioremap_nocache(addr, size)    __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
+#define ioremap_wc(addr, size)         __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
 #define iounmap                                __iounmap
 
 #define ARCH_HAS_IOREMAP_WC
index 0f3b458..75fd13d 100644 (file)
@@ -38,7 +38,8 @@
 #define PMD_SECT_S             (_AT(pmdval_t, 3) << 8)
 #define PMD_SECT_AF            (_AT(pmdval_t, 1) << 10)
 #define PMD_SECT_NG            (_AT(pmdval_t, 1) << 11)
-#define PMD_SECT_XN            (_AT(pmdval_t, 1) << 54)
+#define PMD_SECT_PXN           (_AT(pmdval_t, 1) << 53)
+#define PMD_SECT_UXN           (_AT(pmdval_t, 1) << 54)
 
 /*
  * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
@@ -57,7 +58,8 @@
 #define PTE_SHARED             (_AT(pteval_t, 3) << 8)         /* SH[1:0], inner shareable */
 #define PTE_AF                 (_AT(pteval_t, 1) << 10)        /* Access Flag */
 #define PTE_NG                 (_AT(pteval_t, 1) << 11)        /* nG */
-#define PTE_XN                 (_AT(pteval_t, 1) << 54)        /* XN */
+#define PTE_PXN                        (_AT(pteval_t, 1) << 53)        /* Privileged XN */
+#define PTE_UXN                        (_AT(pteval_t, 1) << 54)        /* User XN */
 
 /*
  * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
index 8960239..14aba2d 100644 (file)
@@ -62,23 +62,23 @@ extern pgprot_t pgprot_default;
 
 #define _MOD_PROT(p, b)        __pgprot(pgprot_val(p) | (b))
 
-#define PAGE_NONE              _MOD_PROT(pgprot_default, PTE_NG | PTE_XN | PTE_RDONLY)
-#define PAGE_SHARED            _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN)
-#define PAGE_SHARED_EXEC       _MOD_PROT(pgprot_default, PTE_USER | PTE_NG)
-#define PAGE_COPY              _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
-#define PAGE_COPY_EXEC         _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY)
-#define PAGE_READONLY          _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
-#define PAGE_READONLY_EXEC     _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY)
-#define PAGE_KERNEL            _MOD_PROT(pgprot_default, PTE_XN | PTE_DIRTY)
-#define PAGE_KERNEL_EXEC       _MOD_PROT(pgprot_default, PTE_DIRTY)
-
-#define __PAGE_NONE            __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_XN | PTE_RDONLY)
-#define __PAGE_SHARED          __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN)
-#define __PAGE_SHARED_EXEC     __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG)
-#define __PAGE_COPY            __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
-#define __PAGE_COPY_EXEC       __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY)
-#define __PAGE_READONLY                __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
-#define __PAGE_READONLY_EXEC   __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY)
+#define PAGE_NONE              _MOD_PROT(pgprot_default, PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define PAGE_SHARED            _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
+#define PAGE_SHARED_EXEC       _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
+#define PAGE_COPY              _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define PAGE_COPY_EXEC         _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
+#define PAGE_READONLY          _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define PAGE_READONLY_EXEC     _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
+#define PAGE_KERNEL            _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
+#define PAGE_KERNEL_EXEC       _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
+
+#define __PAGE_NONE            __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define __PAGE_SHARED          __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
+#define __PAGE_SHARED_EXEC     __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
+#define __PAGE_COPY            __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define __PAGE_COPY_EXEC       __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
+#define __PAGE_READONLY                __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define __PAGE_READONLY_EXEC   __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
 
 #endif /* __ASSEMBLY__ */
 
@@ -130,10 +130,10 @@ extern struct page *empty_zero_page;
 #define pte_young(pte)         (pte_val(pte) & PTE_AF)
 #define pte_special(pte)       (pte_val(pte) & PTE_SPECIAL)
 #define pte_write(pte)         (!(pte_val(pte) & PTE_RDONLY))
-#define pte_exec(pte)          (!(pte_val(pte) & PTE_XN))
+#define pte_exec(pte)          (!(pte_val(pte) & PTE_UXN))
 
 #define pte_present_exec_user(pte) \
-       ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_XN)) == \
+       ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == \
         (PTE_VALID | PTE_USER))
 
 #define PTE_BIT_FUNC(fn,op) \
@@ -262,7 +262,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
 
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
-       const pteval_t mask = PTE_USER | PTE_XN | PTE_RDONLY;
+       const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY;
        pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
        return pte;
 }
index 5d81004..77f696c 100644 (file)
@@ -43,6 +43,8 @@
 #else
 #define STACK_TOP              STACK_TOP_MAX
 #endif /* CONFIG_COMPAT */
+
+#define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK
 #endif /* __KERNEL__ */
 
 struct debug_info {
index 63f853f..68aff28 100644 (file)
@@ -14,7 +14,6 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 #ifdef CONFIG_COMPAT
-#define __ARCH_WANT_COMPAT_IPC_PARSE_VERSION
 #define __ARCH_WANT_COMPAT_STAT64
 #define __ARCH_WANT_SYS_GETHOSTNAME
 #define __ARCH_WANT_SYS_PAUSE
index ecbf2d8..c76c724 100644 (file)
@@ -613,17 +613,11 @@ enum armv8_pmuv3_perf_types {
        ARMV8_PMUV3_PERFCTR_BUS_ACCESS                          = 0x19,
        ARMV8_PMUV3_PERFCTR_MEM_ERROR                           = 0x1A,
        ARMV8_PMUV3_PERFCTR_BUS_CYCLES                          = 0x1D,
-
-       /*
-        * This isn't an architected event.
-        * We detect this event number and use the cycle counter instead.
-        */
-       ARMV8_PMUV3_PERFCTR_CPU_CYCLES                          = 0xFF,
 };
 
 /* PMUv3 HW events mapping. */
 static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
-       [PERF_COUNT_HW_CPU_CYCLES]              = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
+       [PERF_COUNT_HW_CPU_CYCLES]              = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
        [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
        [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
        [PERF_COUNT_HW_CACHE_MISSES]            = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
@@ -1106,7 +1100,7 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
        unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT;
 
        /* Always place a cycle counter into the cycle counter. */
-       if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
+       if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
                if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
                        return -EAGAIN;
 
index f22965e..e04cebd 100644 (file)
@@ -310,24 +310,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
 }
 
 /*
- * Fill in the task's elfregs structure for a core dump.
- */
-int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
-{
-       elf_core_copy_regs(elfregs, task_pt_regs(t));
-       return 1;
-}
-
-/*
- * fill in the fpe structure for a core dump...
- */
-int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
-{
-       return 0;
-}
-EXPORT_SYMBOL(dump_fpu);
-
-/*
  * Shuffle the argument into the correct register before calling the
  * thread function.  x1 is the thread argument, x2 is the pointer to
  * the thread function, and x3 points to the exit function.
index 226b6bf..538300f 100644 (file)
@@ -211,8 +211,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
         * before we continue.
         */
        set_cpu_online(cpu, true);
-       while (!cpu_active(cpu))
-               cpu_relax();
+       complete(&cpu_running);
 
        /*
         * OK, it's off to the idle thread for us
index efbf7df..4cd2893 100644 (file)
@@ -80,7 +80,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
 #ifdef CONFIG_ZONE_DMA32
        /* 4GB maximum for 32-bit only capable devices */
        max_dma32 = min(max, MAX_DMA32_PFN);
-       zone_size[ZONE_DMA32] = max_dma32 - min;
+       zone_size[ZONE_DMA32] = max(min, max_dma32) - min;
 #endif
        zone_size[ZONE_NORMAL] = max - max_dma32;
 
index c635028..05887a1 100644 (file)
@@ -2,7 +2,8 @@
 #define __ARCH_H8300_CACHE_H
 
 /* bytes per L1 cache line */
-#define        L1_CACHE_BYTES  4
+#define        L1_CACHE_SHIFT  2
+#define        L1_CACHE_BYTES  (1 << L1_CACHE_SHIFT)
 
 /* m68k-elf-gcc  2.95.2 doesn't like these */
 
index acd5b68..082e383 100644 (file)
@@ -637,7 +637,6 @@ mem_init (void)
 
        high_memory = __va(max_low_pfn * PAGE_SIZE);
 
-       reset_zone_present_pages();
        for_each_online_pgdat(pgdat)
                if (pgdat->bdata->node_bootmem_map)
                        totalram_pages += free_all_bootmem_node(pgdat);
index 67e489d..2df26b5 100644 (file)
@@ -41,7 +41,7 @@ struct k_sigaction {
 static inline void sigaddset(sigset_t *set, int _sig)
 {
        asm ("bfset %0{%1,#1}"
-               : "+od" (*set)
+               : "+o" (*set)
                : "id" ((_sig - 1) ^ 31)
                : "cc");
 }
@@ -49,7 +49,7 @@ static inline void sigaddset(sigset_t *set, int _sig)
 static inline void sigdelset(sigset_t *set, int _sig)
 {
        asm ("bfclr %0{%1,#1}"
-               : "+od" (*set)
+               : "+o" (*set)
                : "id" ((_sig - 1) ^ 31)
                : "cc");
 }
@@ -65,7 +65,7 @@ static inline int __gen_sigismember(sigset_t *set, int _sig)
        int ret;
        asm ("bfextu %1{%2,#1},%0"
                : "=d" (ret)
-               : "od" (*set), "id" ((_sig-1) ^ 31)
+               : "o" (*set), "id" ((_sig-1) ^ 31)
                : "cc");
        return ret;
 }
index d38246e..9f883bf 100644 (file)
@@ -30,6 +30,7 @@
  * measurement, and debugging facilities.
  */
 
+#include <linux/irqflags.h>
 #include <asm/octeon/cvmx.h>
 #include <asm/octeon/cvmx-l2c.h>
 #include <asm/octeon/cvmx-spinlock.h>
index 7cf80ca..f9f5307 100644 (file)
@@ -11,6 +11,7 @@
  */
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/irqflags.h>
 
 #include <asm/bcache.h>
 
index 82ad35c..46ac73a 100644 (file)
@@ -14,7 +14,6 @@
 #endif
 
 #include <linux/compiler.h>
-#include <linux/irqflags.h>
 #include <linux/types.h>
 #include <asm/barrier.h>
 #include <asm/byteorder.h>             /* sigh ... */
 #define smp_mb__before_clear_bit()     smp_mb__before_llsc()
 #define smp_mb__after_clear_bit()      smp_llsc_mb()
 
+
+/*
+ * These are the "slower" versions of the functions and are in bitops.c.
+ * These functions call raw_local_irq_{save,restore}().
+ */
+void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
+void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
+void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
+int __mips_test_and_set_bit(unsigned long nr,
+                           volatile unsigned long *addr);
+int __mips_test_and_set_bit_lock(unsigned long nr,
+                                volatile unsigned long *addr);
+int __mips_test_and_clear_bit(unsigned long nr,
+                             volatile unsigned long *addr);
+int __mips_test_and_change_bit(unsigned long nr,
+                              volatile unsigned long *addr);
+
+
 /*
  * set_bit - Atomically set a bit in memory
  * @nr: the bit to set
@@ -57,7 +74,7 @@
 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 {
        unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
-       unsigned short bit = nr & SZLONG_MASK;
+       int bit = nr & SZLONG_MASK;
        unsigned long temp;
 
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -92,17 +109,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
                        : "=&r" (temp), "+m" (*m)
                        : "ir" (1UL << bit));
                } while (unlikely(!temp));
-       } else {
-               volatile unsigned long *a = addr;
-               unsigned long mask;
-               unsigned long flags;
-
-               a += nr >> SZLONG_LOG;
-               mask = 1UL << bit;
-               raw_local_irq_save(flags);
-               *a |= mask;
-               raw_local_irq_restore(flags);
-       }
+       } else
+               __mips_set_bit(nr, addr);
 }
 
 /*
@@ -118,7 +126,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
 {
        unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
-       unsigned short bit = nr & SZLONG_MASK;
+       int bit = nr & SZLONG_MASK;
        unsigned long temp;
 
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -153,17 +161,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
                        : "=&r" (temp), "+m" (*m)
                        : "ir" (~(1UL << bit)));
                } while (unlikely(!temp));
-       } else {
-               volatile unsigned long *a = addr;
-               unsigned long mask;
-               unsigned long flags;
-
-               a += nr >> SZLONG_LOG;
-               mask = 1UL << bit;
-               raw_local_irq_save(flags);
-               *a &= ~mask;
-               raw_local_irq_restore(flags);
-       }
+       } else
+               __mips_clear_bit(nr, addr);
 }
 
 /*
@@ -191,7 +190,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad
  */
 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
 {
-       unsigned short bit = nr & SZLONG_MASK;
+       int bit = nr & SZLONG_MASK;
 
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
                unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -220,17 +219,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
                        : "=&r" (temp), "+m" (*m)
                        : "ir" (1UL << bit));
                } while (unlikely(!temp));
-       } else {
-               volatile unsigned long *a = addr;
-               unsigned long mask;
-               unsigned long flags;
-
-               a += nr >> SZLONG_LOG;
-               mask = 1UL << bit;
-               raw_local_irq_save(flags);
-               *a ^= mask;
-               raw_local_irq_restore(flags);
-       }
+       } else
+               __mips_change_bit(nr, addr);
 }
 
 /*
@@ -244,7 +234,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
 static inline int test_and_set_bit(unsigned long nr,
        volatile unsigned long *addr)
 {
-       unsigned short bit = nr & SZLONG_MASK;
+       int bit = nr & SZLONG_MASK;
        unsigned long res;
 
        smp_mb__before_llsc();
@@ -281,18 +271,8 @@ static inline int test_and_set_bit(unsigned long nr,
                } while (unlikely(!res));
 
                res = temp & (1UL << bit);
-       } else {
-               volatile unsigned long *a = addr;
-               unsigned long mask;
-               unsigned long flags;
-
-               a += nr >> SZLONG_LOG;
-               mask = 1UL << bit;
-               raw_local_irq_save(flags);
-               res = (mask & *a);
-               *a |= mask;
-               raw_local_irq_restore(flags);
-       }
+       } else
+               res = __mips_test_and_set_bit(nr, addr);
 
        smp_llsc_mb();
 
@@ -310,7 +290,7 @@ static inline int test_and_set_bit(unsigned long nr,
 static inline int test_and_set_bit_lock(unsigned long nr,
        volatile unsigned long *addr)
 {
-       unsigned short bit = nr & SZLONG_MASK;
+       int bit = nr & SZLONG_MASK;
        unsigned long res;
 
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -345,18 +325,8 @@ static inline int test_and_set_bit_lock(unsigned long nr,
                } while (unlikely(!res));
 
                res = temp & (1UL << bit);
-       } else {
-               volatile unsigned long *a = addr;
-               unsigned long mask;
-               unsigned long flags;
-
-               a += nr >> SZLONG_LOG;
-               mask = 1UL << bit;
-               raw_local_irq_save(flags);
-               res = (mask & *a);
-               *a |= mask;
-               raw_local_irq_restore(flags);
-       }
+       } else
+               res = __mips_test_and_set_bit_lock(nr, addr);
 
        smp_llsc_mb();
 
@@ -373,7 +343,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
 static inline int test_and_clear_bit(unsigned long nr,
        volatile unsigned long *addr)
 {
-       unsigned short bit = nr & SZLONG_MASK;
+       int bit = nr & SZLONG_MASK;
        unsigned long res;
 
        smp_mb__before_llsc();
@@ -428,18 +398,8 @@ static inline int test_and_clear_bit(unsigned long nr,
                } while (unlikely(!res));
 
                res = temp & (1UL << bit);
-       } else {
-               volatile unsigned long *a = addr;
-               unsigned long mask;
-               unsigned long flags;
-
-               a += nr >> SZLONG_LOG;
-               mask = 1UL << bit;
-               raw_local_irq_save(flags);
-               res = (mask & *a);
-               *a &= ~mask;
-               raw_local_irq_restore(flags);
-       }
+       } else
+               res = __mips_test_and_clear_bit(nr, addr);
 
        smp_llsc_mb();
 
@@ -457,7 +417,7 @@ static inline int test_and_clear_bit(unsigned long nr,
 static inline int test_and_change_bit(unsigned long nr,
        volatile unsigned long *addr)
 {
-       unsigned short bit = nr & SZLONG_MASK;
+       int bit = nr & SZLONG_MASK;
        unsigned long res;
 
        smp_mb__before_llsc();
@@ -494,18 +454,8 @@ static inline int test_and_change_bit(unsigned long nr,
                } while (unlikely(!res));
 
                res = temp & (1UL << bit);
-       } else {
-               volatile unsigned long *a = addr;
-               unsigned long mask;
-               unsigned long flags;
-
-               a += nr >> SZLONG_LOG;
-               mask = 1UL << bit;
-               raw_local_irq_save(flags);
-               res = (mask & *a);
-               *a ^= mask;
-               raw_local_irq_restore(flags);
-       }
+       } else
+               res = __mips_test_and_change_bit(nr, addr);
 
        smp_llsc_mb();
 
index 58277e0..3c5d146 100644 (file)
@@ -290,7 +290,7 @@ struct compat_shmid64_ds {
 
 static inline int is_compat_task(void)
 {
-       return test_thread_flag(TIF_32BIT);
+       return test_thread_flag(TIF_32BIT_ADDR);
 }
 
 #endif /* _ASM_COMPAT_H */
index 29d9c23..ff2e034 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/compiler.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
+#include <linux/irqflags.h>
 
 #include <asm/addrspace.h>
 #include <asm/bug.h>
index 309cbcd..9f3384c 100644 (file)
 #include <linux/compiler.h>
 #include <asm/hazards.h>
 
-__asm__(
-       "       .macro  arch_local_irq_enable                           \n"
-       "       .set    push                                            \n"
-       "       .set    reorder                                         \n"
-       "       .set    noat                                            \n"
-#ifdef CONFIG_MIPS_MT_SMTC
-       "       mfc0    $1, $2, 1       # SMTC - clear TCStatus.IXMT    \n"
-       "       ori     $1, 0x400                                       \n"
-       "       xori    $1, 0x400                                       \n"
-       "       mtc0    $1, $2, 1                                       \n"
-#elif defined(CONFIG_CPU_MIPSR2)
-       "       ei                                                      \n"
-#else
-       "       mfc0    $1,$12                                          \n"
-       "       ori     $1,0x1f                                         \n"
-       "       xori    $1,0x1e                                         \n"
-       "       mtc0    $1,$12                                          \n"
-#endif
-       "       irq_enable_hazard                                       \n"
-       "       .set    pop                                             \n"
-       "       .endm");
+#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
 
-extern void smtc_ipi_replay(void);
-
-static inline void arch_local_irq_enable(void)
-{
-#ifdef CONFIG_MIPS_MT_SMTC
-       /*
-        * SMTC kernel needs to do a software replay of queued
-        * IPIs, at the cost of call overhead on each local_irq_enable()
-        */
-       smtc_ipi_replay();
-#endif
-       __asm__ __volatile__(
-               "arch_local_irq_enable"
-               : /* no outputs */
-               : /* no inputs */
-               : "memory");
-}
-
-
-/*
- * For cli() we have to insert nops to make sure that the new value
- * has actually arrived in the status register before the end of this
- * macro.
- * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
- * no nops at all.
- */
-/*
- * For TX49, operating only IE bit is not enough.
- *
- * If mfc0 $12 follows store and the mfc0 is last instruction of a
- * page and fetching the next instruction causes TLB miss, the result
- * of the mfc0 might wrongly contain EXL bit.
- *
- * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
- *
- * Workaround: mask EXL bit of the result or place a nop before mfc0.
- */
 __asm__(
        "       .macro  arch_local_irq_disable\n"
        "       .set    push                                            \n"
        "       .set    noat                                            \n"
-#ifdef CONFIG_MIPS_MT_SMTC
-       "       mfc0    $1, $2, 1                                       \n"
-       "       ori     $1, 0x400                                       \n"
-       "       .set    noreorder                                       \n"
-       "       mtc0    $1, $2, 1                                       \n"
-#elif defined(CONFIG_CPU_MIPSR2)
        "       di                                                      \n"
-#else
-       "       mfc0    $1,$12                                          \n"
-       "       ori     $1,0x1f                                         \n"
-       "       xori    $1,0x1f                                         \n"
-       "       .set    noreorder                                       \n"
-       "       mtc0    $1,$12                                          \n"
-#endif
        "       irq_disable_hazard                                      \n"
        "       .set    pop                                             \n"
        "       .endm                                                   \n");
@@ -106,46 +36,14 @@ static inline void arch_local_irq_disable(void)
                : "memory");
 }
 
-__asm__(
-       "       .macro  arch_local_save_flags flags                     \n"
-       "       .set    push                                            \n"
-       "       .set    reorder                                         \n"
-#ifdef CONFIG_MIPS_MT_SMTC
-       "       mfc0    \\flags, $2, 1                                  \n"
-#else
-       "       mfc0    \\flags, $12                                    \n"
-#endif
-       "       .set    pop                                             \n"
-       "       .endm                                                   \n");
-
-static inline unsigned long arch_local_save_flags(void)
-{
-       unsigned long flags;
-       asm volatile("arch_local_save_flags %0" : "=r" (flags));
-       return flags;
-}
 
 __asm__(
        "       .macro  arch_local_irq_save result                      \n"
        "       .set    push                                            \n"
        "       .set    reorder                                         \n"
        "       .set    noat                                            \n"
-#ifdef CONFIG_MIPS_MT_SMTC
-       "       mfc0    \\result, $2, 1                                 \n"
-       "       ori     $1, \\result, 0x400                             \n"
-       "       .set    noreorder                                       \n"
-       "       mtc0    $1, $2, 1                                       \n"
-       "       andi    \\result, \\result, 0x400                       \n"
-#elif defined(CONFIG_CPU_MIPSR2)
        "       di      \\result                                        \n"
        "       andi    \\result, 1                                     \n"
-#else
-       "       mfc0    \\result, $12                                   \n"
-       "       ori     $1, \\result, 0x1f                              \n"
-       "       xori    $1, 0x1f                                        \n"
-       "       .set    noreorder                                       \n"
-       "       mtc0    $1, $12                                         \n"
-#endif
        "       irq_disable_hazard                                      \n"
        "       .set    pop                                             \n"
        "       .endm                                                   \n");
@@ -160,61 +58,37 @@ static inline unsigned long arch_local_irq_save(void)
        return flags;
 }
 
+
 __asm__(
        "       .macro  arch_local_irq_restore flags                    \n"
        "       .set    push                                            \n"
        "       .set    noreorder                                       \n"
        "       .set    noat                                            \n"
-#ifdef CONFIG_MIPS_MT_SMTC
-       "mfc0   $1, $2, 1                                               \n"
-       "andi   \\flags, 0x400                                          \n"
-       "ori    $1, 0x400                                               \n"
-       "xori   $1, 0x400                                               \n"
-       "or     \\flags, $1                                             \n"
-       "mtc0   \\flags, $2, 1                                          \n"
-#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+#if defined(CONFIG_IRQ_CPU)
        /*
         * Slow, but doesn't suffer from a relatively unlikely race
         * condition we're having since days 1.
         */
        "       beqz    \\flags, 1f                                     \n"
-       "        di                                                     \n"
+       "       di                                                      \n"
        "       ei                                                      \n"
        "1:                                                             \n"
-#elif defined(CONFIG_CPU_MIPSR2)
+#else
        /*
         * Fast, dangerous.  Life is fun, life is good.
         */
        "       mfc0    $1, $12                                         \n"
        "       ins     $1, \\flags, 0, 1                               \n"
        "       mtc0    $1, $12                                         \n"
-#else
-       "       mfc0    $1, $12                                         \n"
-       "       andi    \\flags, 1                                      \n"
-       "       ori     $1, 0x1f                                        \n"
-       "       xori    $1, 0x1f                                        \n"
-       "       or      \\flags, $1                                     \n"
-       "       mtc0    \\flags, $12                                    \n"
 #endif
        "       irq_disable_hazard                                      \n"
        "       .set    pop                                             \n"
        "       .endm                                                   \n");
 
-
 static inline void arch_local_irq_restore(unsigned long flags)
 {
        unsigned long __tmp1;
 
-#ifdef CONFIG_MIPS_MT_SMTC
-       /*
-        * SMTC kernel needs to do a software replay of queued
-        * IPIs, at the cost of branch and call overhead on each
-        * local_irq_restore()
-        */
-       if (unlikely(!(flags & 0x0400)))
-               smtc_ipi_replay();
-#endif
-
        __asm__ __volatile__(
                "arch_local_irq_restore\t%0"
                : "=r" (__tmp1)
@@ -232,6 +106,75 @@ static inline void __arch_local_irq_restore(unsigned long flags)
                : "0" (flags)
                : "memory");
 }
+#else
+/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
+void arch_local_irq_disable(void);
+unsigned long arch_local_irq_save(void);
+void arch_local_irq_restore(unsigned long flags);
+void __arch_local_irq_restore(unsigned long flags);
+#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
+
+
+__asm__(
+       "       .macro  arch_local_irq_enable                           \n"
+       "       .set    push                                            \n"
+       "       .set    reorder                                         \n"
+       "       .set    noat                                            \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+       "       mfc0    $1, $2, 1       # SMTC - clear TCStatus.IXMT    \n"
+       "       ori     $1, 0x400                                       \n"
+       "       xori    $1, 0x400                                       \n"
+       "       mtc0    $1, $2, 1                                       \n"
+#elif defined(CONFIG_CPU_MIPSR2)
+       "       ei                                                      \n"
+#else
+       "       mfc0    $1,$12                                          \n"
+       "       ori     $1,0x1f                                         \n"
+       "       xori    $1,0x1e                                         \n"
+       "       mtc0    $1,$12                                          \n"
+#endif
+       "       irq_enable_hazard                                       \n"
+       "       .set    pop                                             \n"
+       "       .endm");
+
+extern void smtc_ipi_replay(void);
+
+static inline void arch_local_irq_enable(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * SMTC kernel needs to do a software replay of queued
+        * IPIs, at the cost of call overhead on each local_irq_enable()
+        */
+       smtc_ipi_replay();
+#endif
+       __asm__ __volatile__(
+               "arch_local_irq_enable"
+               : /* no outputs */
+               : /* no inputs */
+               : "memory");
+}
+
+
+__asm__(
+       "       .macro  arch_local_save_flags flags                     \n"
+       "       .set    push                                            \n"
+       "       .set    reorder                                         \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+       "       mfc0    \\flags, $2, 1                                  \n"
+#else
+       "       mfc0    \\flags, $12                                    \n"
+#endif
+       "       .set    pop                                             \n"
+       "       .endm                                                   \n");
+
+static inline unsigned long arch_local_save_flags(void)
+{
+       unsigned long flags;
+       asm volatile("arch_local_save_flags %0" : "=r" (flags));
+       return flags;
+}
+
 
 static inline int arch_irqs_disabled_flags(unsigned long flags)
 {
@@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
 #endif
 }
 
-#endif
+#endif /* #ifndef __ASSEMBLY__ */
 
 /*
  * Do the CPU's IRQ-state tracing from assembly code.
index 8debe9e..18806a5 100644 (file)
@@ -112,12 +112,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
 #define TIF_LOAD_WATCH         25      /* If set, load watch registers */
 #define TIF_SYSCALL_TRACE      31      /* syscall trace active */
 
-#ifdef CONFIG_MIPS32_O32
-#define TIF_32BIT TIF_32BIT_REGS
-#elif defined(CONFIG_MIPS32_N32)
-#define TIF_32BIT _TIF_32BIT_ADDR
-#endif /* CONFIG_MIPS32_O32 */
-
 #define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING                (1<<TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED      (1<<TIF_NEED_RESCHED)
index a53f8ec..290dc6a 100644 (file)
@@ -79,7 +79,7 @@ static struct resource data_resource = { .name = "Kernel data", };
 void __init add_memory_region(phys_t start, phys_t size, long type)
 {
        int x = boot_mem_map.nr_map;
-       struct boot_mem_map_entry *prev = boot_mem_map.map + x - 1;
+       int i;
 
        /* Sanity check */
        if (start + size < start) {
@@ -88,15 +88,29 @@ void __init add_memory_region(phys_t start, phys_t size, long type)
        }
 
        /*
-        * Try to merge with previous entry if any.  This is far less than
-        * perfect but is sufficient for most real world cases.
+        * Try to merge with existing entry, if any.
         */
-       if (x && prev->addr + prev->size == start && prev->type == type) {
-               prev->size += size;
+       for (i = 0; i < boot_mem_map.nr_map; i++) {
+               struct boot_mem_map_entry *entry = boot_mem_map.map + i;
+               unsigned long top;
+
+               if (entry->type != type)
+                       continue;
+
+               if (start + size < entry->addr)
+                       continue;                       /* no overlap */
+
+               if (entry->addr + entry->size < start)
+                       continue;                       /* no overlap */
+
+               top = max(entry->addr + entry->size, start + size);
+               entry->addr = min(entry->addr, start);
+               entry->size = top - entry->addr;
+
                return;
        }
 
-       if (x == BOOT_MEM_MAP_MAX) {
+       if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
                pr_err("Ooops! Too many entries in the memory map!\n");
                return;
        }
index c4a82e8..eeddc58 100644 (file)
@@ -2,8 +2,9 @@
 # Makefile for MIPS-specific library files..
 #
 
-lib-y  += csum_partial.o delay.o memcpy.o memset.o \
-          strlen_user.o strncpy_user.o strnlen_user.o uncached.o
+lib-y  += bitops.o csum_partial.o delay.o memcpy.o memset.o \
+          mips-atomic.o strlen_user.o strncpy_user.o \
+          strnlen_user.o uncached.o
 
 obj-y                  += iomap.o
 obj-$(CONFIG_PCI)      += iomap-pci.o
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
new file mode 100644 (file)
index 0000000..239a9c9
--- /dev/null
@@ -0,0 +1,179 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1994-1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
+ */
+#include <linux/bitops.h>
+#include <linux/irqflags.h>
+#include <linux/export.h>
+
+
+/**
+ * __mips_set_bit - Atomically set a bit in memory.  This is called by
+ * set_bit() if it cannot find a faster solution.
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ */
+void __mips_set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+       volatile unsigned long *a = addr;
+       unsigned bit = nr & SZLONG_MASK;
+       unsigned long mask;
+       unsigned long flags;
+
+       a += nr >> SZLONG_LOG;
+       mask = 1UL << bit;
+       raw_local_irq_save(flags);
+       *a |= mask;
+       raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_set_bit);
+
+
+/**
+ * __mips_clear_bit - Clears a bit in memory.  This is called by clear_bit() if
+ * it cannot find a faster solution.
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ */
+void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+       volatile unsigned long *a = addr;
+       unsigned bit = nr & SZLONG_MASK;
+       unsigned long mask;
+       unsigned long flags;
+
+       a += nr >> SZLONG_LOG;
+       mask = 1UL << bit;
+       raw_local_irq_save(flags);
+       *a &= ~mask;
+       raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_clear_bit);
+
+
+/**
+ * __mips_change_bit - Toggle a bit in memory.  This is called by change_bit()
+ * if it cannot find a faster solution.
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ */
+void __mips_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+       volatile unsigned long *a = addr;
+       unsigned bit = nr & SZLONG_MASK;
+       unsigned long mask;
+       unsigned long flags;
+
+       a += nr >> SZLONG_LOG;
+       mask = 1UL << bit;
+       raw_local_irq_save(flags);
+       *a ^= mask;
+       raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_change_bit);
+
+
+/**
+ * __mips_test_and_set_bit - Set a bit and return its old value.  This is
+ * called by test_and_set_bit() if it cannot find a faster solution.
+ * @nr: Bit to set
+ * @addr: Address to count from
+ */
+int __mips_test_and_set_bit(unsigned long nr,
+                           volatile unsigned long *addr)
+{
+       volatile unsigned long *a = addr;
+       unsigned bit = nr & SZLONG_MASK;
+       unsigned long mask;
+       unsigned long flags;
+       unsigned long res;
+
+       a += nr >> SZLONG_LOG;
+       mask = 1UL << bit;
+       raw_local_irq_save(flags);
+       res = (mask & *a);
+       *a |= mask;
+       raw_local_irq_restore(flags);
+       return res;
+}
+EXPORT_SYMBOL(__mips_test_and_set_bit);
+
+
+/**
+ * __mips_test_and_set_bit_lock - Set a bit and return its old value.  This is
+ * called by test_and_set_bit_lock() if it cannot find a faster solution.
+ * @nr: Bit to set
+ * @addr: Address to count from
+ */
+int __mips_test_and_set_bit_lock(unsigned long nr,
+                                volatile unsigned long *addr)
+{
+       volatile unsigned long *a = addr;
+       unsigned bit = nr & SZLONG_MASK;
+       unsigned long mask;
+       unsigned long flags;
+       unsigned long res;
+
+       a += nr >> SZLONG_LOG;
+       mask = 1UL << bit;
+       raw_local_irq_save(flags);
+       res = (mask & *a);
+       *a |= mask;
+       raw_local_irq_restore(flags);
+       return res;
+}
+EXPORT_SYMBOL(__mips_test_and_set_bit_lock);
+
+
+/**
+ * __mips_test_and_clear_bit - Clear a bit and return its old value.  This is
+ * called by test_and_clear_bit() if it cannot find a faster solution.
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ */
+int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+       volatile unsigned long *a = addr;
+       unsigned bit = nr & SZLONG_MASK;
+       unsigned long mask;
+       unsigned long flags;
+       unsigned long res;
+
+       a += nr >> SZLONG_LOG;
+       mask = 1UL << bit;
+       raw_local_irq_save(flags);
+       res = (mask & *a);
+       *a &= ~mask;
+       raw_local_irq_restore(flags);
+       return res;
+}
+EXPORT_SYMBOL(__mips_test_and_clear_bit);
+
+
+/**
+ * __mips_test_and_change_bit - Change a bit and return its old value.  This is
+ * called by test_and_change_bit() if it cannot find a faster solution.
+ * @nr: Bit to change
+ * @addr: Address to count from
+ */
+int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+       volatile unsigned long *a = addr;
+       unsigned bit = nr & SZLONG_MASK;
+       unsigned long mask;
+       unsigned long flags;
+       unsigned long res;
+
+       a += nr >> SZLONG_LOG;
+       mask = 1UL << bit;
+       raw_local_irq_save(flags);
+       res = (mask & *a);
+       *a ^= mask;
+       raw_local_irq_restore(flags);
+       return res;
+}
+EXPORT_SYMBOL(__mips_test_and_change_bit);
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
new file mode 100644 (file)
index 0000000..cd160be
--- /dev/null
@@ -0,0 +1,176 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
+ * Copyright (C) 1996 by Paul M. Antoine
+ * Copyright (C) 1999 Silicon Graphics
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ */
+#include <asm/irqflags.h>
+#include <asm/hazards.h>
+#include <linux/compiler.h>
+#include <linux/preempt.h>
+#include <linux/export.h>
+
+#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
+
+/*
+ * For cli() we have to insert nops to make sure that the new value
+ * has actually arrived in the status register before the end of this
+ * macro.
+ * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
+ * no nops at all.
+ */
+/*
+ * For TX49, operating only IE bit is not enough.
+ *
+ * If mfc0 $12 follows store and the mfc0 is last instruction of a
+ * page and fetching the next instruction causes TLB miss, the result
+ * of the mfc0 might wrongly contain EXL bit.
+ *
+ * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
+ *
+ * Workaround: mask EXL bit of the result or place a nop before mfc0.
+ */
+__asm__(
+       "       .macro  arch_local_irq_disable\n"
+       "       .set    push                                            \n"
+       "       .set    noat                                            \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+       "       mfc0    $1, $2, 1                                       \n"
+       "       ori     $1, 0x400                                       \n"
+       "       .set    noreorder                                       \n"
+       "       mtc0    $1, $2, 1                                       \n"
+#elif defined(CONFIG_CPU_MIPSR2)
+       /* see irqflags.h for inline function */
+#else
+       "       mfc0    $1,$12                                          \n"
+       "       ori     $1,0x1f                                         \n"
+       "       xori    $1,0x1f                                         \n"
+       "       .set    noreorder                                       \n"
+       "       mtc0    $1,$12                                          \n"
+#endif
+       "       irq_disable_hazard                                      \n"
+       "       .set    pop                                             \n"
+       "       .endm                                                   \n");
+
+notrace void arch_local_irq_disable(void)
+{
+       preempt_disable();
+       __asm__ __volatile__(
+               "arch_local_irq_disable"
+               : /* no outputs */
+               : /* no inputs */
+               : "memory");
+       preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_disable);
+
+
+__asm__(
+       "       .macro  arch_local_irq_save result                      \n"
+       "       .set    push                                            \n"
+       "       .set    reorder                                         \n"
+       "       .set    noat                                            \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+       "       mfc0    \\result, $2, 1                                 \n"
+       "       ori     $1, \\result, 0x400                             \n"
+       "       .set    noreorder                                       \n"
+       "       mtc0    $1, $2, 1                                       \n"
+       "       andi    \\result, \\result, 0x400                       \n"
+#elif defined(CONFIG_CPU_MIPSR2)
+       /* see irqflags.h for inline function */
+#else
+       "       mfc0    \\result, $12                                   \n"
+       "       ori     $1, \\result, 0x1f                              \n"
+       "       xori    $1, 0x1f                                        \n"
+       "       .set    noreorder                                       \n"
+       "       mtc0    $1, $12                                         \n"
+#endif
+       "       irq_disable_hazard                                      \n"
+       "       .set    pop                                             \n"
+       "       .endm                                                   \n");
+
+notrace unsigned long arch_local_irq_save(void)
+{
+       unsigned long flags;
+       preempt_disable();
+       asm volatile("arch_local_irq_save\t%0"
+                    : "=r" (flags)
+                    : /* no inputs */
+                    : "memory");
+       preempt_enable();
+       return flags;
+}
+EXPORT_SYMBOL(arch_local_irq_save);
+
+
+__asm__(
+       "       .macro  arch_local_irq_restore flags                    \n"
+       "       .set    push                                            \n"
+       "       .set    noreorder                                       \n"
+       "       .set    noat                                            \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+       "mfc0   $1, $2, 1                                               \n"
+       "andi   \\flags, 0x400                                          \n"
+       "ori    $1, 0x400                                               \n"
+       "xori   $1, 0x400                                               \n"
+       "or     \\flags, $1                                             \n"
+       "mtc0   \\flags, $2, 1                                          \n"
+#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+       /* see irqflags.h for inline function */
+#elif defined(CONFIG_CPU_MIPSR2)
+       /* see irqflags.h for inline function */
+#else
+       "       mfc0    $1, $12                                         \n"
+       "       andi    \\flags, 1                                      \n"
+       "       ori     $1, 0x1f                                        \n"
+       "       xori    $1, 0x1f                                        \n"
+       "       or      \\flags, $1                                     \n"
+       "       mtc0    \\flags, $12                                    \n"
+#endif
+       "       irq_disable_hazard                                      \n"
+       "       .set    pop                                             \n"
+       "       .endm                                                   \n");
+
+notrace void arch_local_irq_restore(unsigned long flags)
+{
+       unsigned long __tmp1;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * SMTC kernel needs to do a software replay of queued
+        * IPIs, at the cost of branch and call overhead on each
+        * local_irq_restore()
+        */
+       if (unlikely(!(flags & 0x0400)))
+               smtc_ipi_replay();
+#endif
+       preempt_disable();
+       __asm__ __volatile__(
+               "arch_local_irq_restore\t%0"
+               : "=r" (__tmp1)
+               : "0" (flags)
+               : "memory");
+       preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_restore);
+
+
+notrace void __arch_local_irq_restore(unsigned long flags)
+{
+       unsigned long __tmp1;
+
+       preempt_disable();
+       __asm__ __volatile__(
+               "arch_local_irq_restore\t%0"
+               : "=r" (__tmp1)
+               : "0" (flags)
+               : "memory");
+       preempt_enable();
+}
+EXPORT_SYMBOL(__arch_local_irq_restore);
+
+#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */
index 80562b8..7473217 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/physmap.h>
 #include <linux/platform_device.h>
+#include <asm/mips-boards/maltaint.h>
 #include <mtd/mtd-abi.h>
 
 #define SMC_PORT(base, int)                                            \
@@ -48,7 +49,7 @@ static struct plat_serial8250_port uart8250_data[] = {
        SMC_PORT(0x2F8, 3),
        {
                .mapbase        = 0x1f000900,   /* The CBUS UART */
-               .irq            = MIPS_CPU_IRQ_BASE + 2,
+               .irq            = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2,
                .uartclk        = 3686400,      /* Twice the usual clk! */
                .iotype         = UPIO_MEM32,
                .flags          = CBUS_UART_FLAGS,
index fd49aed..5dede04 100644 (file)
@@ -65,7 +65,8 @@ put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
 {
        compat_sigset_t s;
 
-       if (sz != sizeof *set) panic("put_sigset32()");
+       if (sz != sizeof *set)
+               return -EINVAL;
        sigset_64to32(&s, set);
 
        return copy_to_user(up, &s, sizeof s);
@@ -77,7 +78,8 @@ get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
        compat_sigset_t s;
        int r;
 
-       if (sz != sizeof *set) panic("put_sigset32()");
+       if (sz != sizeof *set)
+               return -EINVAL;
 
        if ((r = copy_from_user(&s, up, sz)) == 0) {
                sigset_32to64(set, &s);
index 7426e40..f76c108 100644 (file)
@@ -73,6 +73,8 @@ static unsigned long get_shared_area(struct address_space *mapping,
        struct vm_area_struct *vma;
        int offset = mapping ? get_offset(mapping) : 0;
 
+       offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
+
        addr = DCACHE_ALIGN(addr - offset) + offset;
 
        for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
index 7ab286a..39ed65a 100644 (file)
                        interrupts = <2 7 0>;
                };
 
+               sclpc@3c00 {
+                       compatible = "fsl,mpc5200-lpbfifo";
+                       reg = <0x3c00 0x60>;
+                       interrupts = <2 23 0>;
+               };
+
                i2c@3d00 {
                        #address-cells = <1>;
                        #size-cells = <0>;
index 3444eb8..24f6680 100644 (file)
                                reg = <0>;
                        };
                };
-
-               sclpc@3c00 {
-                       compatible = "fsl,mpc5200-lpbfifo";
-                       reg = <0x3c00 0x60>;
-                       interrupts = <3 23 0>;
-               };
        };
 
        localbus {
index 9e35499..96512c0 100644 (file)
@@ -59,7 +59,7 @@
                        #gpio-cells = <2>;
                };
 
-               psc@2000 { /* PSC1 in ac97 mode */
+               audioplatform: psc@2000 { /* PSC1 in ac97 mode */
                        compatible = "mpc5200b-psc-ac97","fsl,mpc5200b-psc-ac97";
                        cell-index = <0>;
                };
        localbus {
                status = "disabled";
        };
+
+       sound {
+               compatible = "phytec,pcm030-audio-fabric";
+               asoc-platform = <&audioplatform>;
+       };
 };
index 8520b58..b89ef65 100644 (file)
@@ -372,10 +372,11 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
        case MPC52xx_IRQ_L1_MAIN: irqchip = &mpc52xx_main_irqchip; break;
        case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break;
        case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break;
-       default:
-               pr_err("%s: invalid irq: virq=%i, l1=%i, l2=%i\n",
-                      __func__, virq, l1irq, l2irq);
-               return -EINVAL;
+       case MPC52xx_IRQ_L1_CRIT:
+               pr_warn("%s: Critical IRQ #%d is unsupported! Nopping it.\n",
+                       __func__, l2irq);
+               irq_set_chip(virq, &no_irq_chip);
+               return 0;
        }
 
        irq_set_chip_and_handler(virq, irqchip, handle_level_irq);
index 797cd18..d16c8de 100644 (file)
@@ -449,7 +449,7 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe)
                        if (list_empty(&pe->edevs)) {
                                cnt = 0;
                                list_for_each_entry(child, &pe->child_list, child) {
-                                       if (!(pe->type & EEH_PE_INVALID)) {
+                                       if (!(child->type & EEH_PE_INVALID)) {
                                                cnt++;
                                                break;
                                        }
index d19f497..e5b0847 100644 (file)
@@ -220,7 +220,8 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
 
        /* Get the top level device in the PE */
        edev = of_node_to_eeh_dev(dn);
-       edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list);
+       if (edev->pe)
+               edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list);
        dn = eeh_dev_to_of_node(edev);
        if (!dn)
                return NULL;
index 5dba755..d385f39 100644 (file)
@@ -96,6 +96,7 @@ config S390
        select HAVE_MEMBLOCK_NODE_MAP
        select HAVE_CMPXCHG_LOCAL
        select HAVE_CMPXCHG_DOUBLE
+       select HAVE_ALIGNED_STRUCT_PAGE if SLUB
        select HAVE_VIRT_CPU_ACCOUNTING
        select VIRT_CPU_ACCOUNTING
        select ARCH_DISCARD_MEMBLOCK
index 55bde60..ad2b924 100644 (file)
@@ -9,6 +9,8 @@
 
 #define LPM_ANYPATH 0xff
 #define __MAX_CSSID 0
+#define __MAX_SUBCHANNEL 65535
+#define __MAX_SSID 3
 
 #include <asm/scsw.h>
 
index a34a9d6..18cd6b5 100644 (file)
@@ -20,7 +20,7 @@
 #define PSW32_MASK_CC          0x00003000UL
 #define PSW32_MASK_PM          0x00000f00UL
 
-#define PSW32_MASK_USER                0x00003F00UL
+#define PSW32_MASK_USER                0x0000FF00UL
 
 #define PSW32_ADDR_AMODE       0x80000000UL
 #define PSW32_ADDR_INSN                0x7FFFFFFFUL
index dd647c9..2d3b7cb 100644 (file)
@@ -506,12 +506,15 @@ static inline int pud_bad(pud_t pud)
 
 static inline int pmd_present(pmd_t pmd)
 {
-       return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL;
+       unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO;
+       return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE ||
+              !(pmd_val(pmd) & _SEGMENT_ENTRY_INV);
 }
 
 static inline int pmd_none(pmd_t pmd)
 {
-       return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
+       return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) &&
+              !(pmd_val(pmd) & _SEGMENT_ENTRY_RO);
 }
 
 static inline int pmd_large(pmd_t pmd)
@@ -1223,6 +1226,11 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+#define SEGMENT_NONE   __pgprot(_HPAGE_TYPE_NONE)
+#define SEGMENT_RO     __pgprot(_HPAGE_TYPE_RO)
+#define SEGMENT_RW     __pgprot(_HPAGE_TYPE_RW)
+
 #define __HAVE_ARCH_PGTABLE_DEPOSIT
 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);
 
@@ -1242,16 +1250,15 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 
 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
 {
-       unsigned long pgprot_pmd = 0;
-
-       if (pgprot_val(pgprot) & _PAGE_INVALID) {
-               if (pgprot_val(pgprot) & _PAGE_SWT)
-                       pgprot_pmd |= _HPAGE_TYPE_NONE;
-               pgprot_pmd |= _SEGMENT_ENTRY_INV;
-       }
-       if (pgprot_val(pgprot) & _PAGE_RO)
-               pgprot_pmd |= _SEGMENT_ENTRY_RO;
-       return pgprot_pmd;
+       /*
+        * pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx)
+        * Convert to segment table entry format.
+        */
+       if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
+               return pgprot_val(SEGMENT_NONE);
+       if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
+               return pgprot_val(SEGMENT_RO);
+       return pgprot_val(SEGMENT_RW);
 }
 
 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
@@ -1269,7 +1276,9 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
 
 static inline pmd_t pmd_mkwrite(pmd_t pmd)
 {
-       pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
+       /* Do not clobber _HPAGE_TYPE_NONE pages! */
+       if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV))
+               pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
        return pmd;
 }
 
index 9ca3053..9935cbd 100644 (file)
@@ -8,6 +8,9 @@ struct cpu;
 
 #ifdef CONFIG_SCHED_BOOK
 
+extern unsigned char cpu_socket_id[NR_CPUS];
+#define topology_physical_package_id(cpu) (cpu_socket_id[cpu])
+
 extern unsigned char cpu_core_id[NR_CPUS];
 extern cpumask_t cpu_core_map[NR_CPUS];
 
index 705588a..a5ca214 100644 (file)
@@ -239,7 +239,7 @@ typedef struct
 #define PSW_MASK_EA            0x00000000UL
 #define PSW_MASK_BA            0x00000000UL
 
-#define PSW_MASK_USER          0x00003F00UL
+#define PSW_MASK_USER          0x0000FF00UL
 
 #define PSW_ADDR_AMODE         0x80000000UL
 #define PSW_ADDR_INSN          0x7FFFFFFFUL
@@ -269,7 +269,7 @@ typedef struct
 #define PSW_MASK_EA            0x0000000100000000UL
 #define PSW_MASK_BA            0x0000000080000000UL
 
-#define PSW_MASK_USER          0x00003F8180000000UL
+#define PSW_MASK_USER          0x0000FF8180000000UL
 
 #define PSW_ADDR_AMODE         0x0000000000000000UL
 #define PSW_ADDR_INSN          0xFFFFFFFFFFFFFFFFUL
index a1e8a86..593fcc9 100644 (file)
@@ -309,6 +309,10 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
        regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
                (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 |
                (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE);
+       /* Check for invalid user address space control. */
+       if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
+               regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
+                       (regs->psw.mask & ~PSW_MASK_ASC);
        regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN);
        for (i = 0; i < NUM_GPRS; i++)
                regs->gprs[i] = (__u64) regs32.gprs[i];
@@ -481,7 +485,10 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
 
        /* Set up registers for signal handler */
        regs->gprs[15] = (__force __u64) frame;
-       regs->psw.mask |= PSW_MASK_BA;          /* force amode 31 */
+       /* Force 31 bit amode and default user address space control. */
+       regs->psw.mask = PSW_MASK_BA |
+               (psw_user_bits & PSW_MASK_ASC) |
+               (regs->psw.mask & ~PSW_MASK_ASC);
        regs->psw.addr = (__force __u64) ka->sa.sa_handler;
 
        regs->gprs[2] = map_signal(sig);
@@ -549,7 +556,10 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
 
        /* Set up registers for signal handler */
        regs->gprs[15] = (__force __u64) frame;
-       regs->psw.mask |= PSW_MASK_BA;          /* force amode 31 */
+       /* Force 31 bit amode and default user address space control. */
+       regs->psw.mask = PSW_MASK_BA |
+               (psw_user_bits & PSW_MASK_ASC) |
+               (regs->psw.mask & ~PSW_MASK_ASC);
        regs->psw.addr = (__u64) ka->sa.sa_handler;
 
        regs->gprs[2] = map_signal(sig);
index bf05389..b6506ee 100644 (file)
@@ -44,6 +44,12 @@ _sclp_wait_int:
 #endif
        mvc     .LoldpswS1-.LbaseS1(16,%r13),0(%r8)
        mvc     0(16,%r8),0(%r9)
+#ifdef CONFIG_64BIT
+       epsw    %r6,%r7                         # set current addressing mode
+       nill    %r6,0x1                         # in new psw (31 or 64 bit mode)
+       nilh    %r7,0x8000
+       stm     %r6,%r7,0(%r8)
+#endif
        lhi     %r6,0x0200                      # cr mask for ext int (cr0.54)
        ltr     %r2,%r2
        jz      .LsetctS1
@@ -87,7 +93,7 @@ _sclp_wait_int:
        .long   0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int
 #ifdef CONFIG_64BIT
 .LextpswS1_64:
-       .quad   0x0000000180000000, .LwaitS1    # PSW to handle ext int, 64 bit
+       .quad   0, .LwaitS1                     # PSW to handle ext int, 64 bit
 #endif
 .LwaitpswS1:
        .long   0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int
index c13a2a3..d1259d8 100644 (file)
@@ -136,6 +136,10 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
        /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */
        regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
                (user_sregs.regs.psw.mask & PSW_MASK_USER);
+       /* Check for invalid user address space control. */
+       if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
+               regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
+                       (regs->psw.mask & ~PSW_MASK_ASC);
        /* Check for invalid amode */
        if (regs->psw.mask & PSW_MASK_EA)
                regs->psw.mask |= PSW_MASK_BA;
@@ -273,7 +277,10 @@ static int setup_frame(int sig, struct k_sigaction *ka,
 
        /* Set up registers for signal handler */
        regs->gprs[15] = (unsigned long) frame;
-       regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA;    /* 64 bit amode */
+       /* Force default amode and default user address space control. */
+       regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
+               (psw_user_bits & PSW_MASK_ASC) |
+               (regs->psw.mask & ~PSW_MASK_ASC);
        regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
 
        regs->gprs[2] = map_signal(sig);
@@ -346,7 +353,10 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
 
        /* Set up registers for signal handler */
        regs->gprs[15] = (unsigned long) frame;
-       regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA;    /* 64 bit amode */
+       /* Force default amode and default user address space control. */
+       regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
+               (psw_user_bits & PSW_MASK_ASC) |
+               (regs->psw.mask & ~PSW_MASK_ASC);
        regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
 
        regs->gprs[2] = map_signal(sig);
index 54d93f4..dd55f7c 100644 (file)
@@ -40,6 +40,7 @@ static DEFINE_SPINLOCK(topology_lock);
 static struct mask_info core_info;
 cpumask_t cpu_core_map[NR_CPUS];
 unsigned char cpu_core_id[NR_CPUS];
+unsigned char cpu_socket_id[NR_CPUS];
 
 static struct mask_info book_info;
 cpumask_t cpu_book_map[NR_CPUS];
@@ -83,11 +84,12 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
                        cpumask_set_cpu(lcpu, &book->mask);
                        cpu_book_id[lcpu] = book->id;
                        cpumask_set_cpu(lcpu, &core->mask);
+                       cpu_core_id[lcpu] = rcpu;
                        if (one_core_per_cpu) {
-                               cpu_core_id[lcpu] = rcpu;
+                               cpu_socket_id[lcpu] = rcpu;
                                core = core->next;
                        } else {
-                               cpu_core_id[lcpu] = core->id;
+                               cpu_socket_id[lcpu] = core->id;
                        }
                        smp_cpu_set_polarization(lcpu, tl_cpu->pp);
                }
index 2d37bb8..9017a63 100644 (file)
@@ -39,7 +39,7 @@ static __always_inline unsigned long follow_table(struct mm_struct *mm,
        pmd = pmd_offset(pud, addr);
        if (pmd_none(*pmd))
                return -0x10UL;
-       if (pmd_huge(*pmd)) {
+       if (pmd_large(*pmd)) {
                if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
                        return -0x04UL;
                return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
index 60acb93..1f5315d 100644 (file)
@@ -126,7 +126,7 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
                 */
                if (pmd_none(pmd) || pmd_trans_splitting(pmd))
                        return 0;
-               if (unlikely(pmd_huge(pmd))) {
+               if (unlikely(pmd_large(pmd))) {
                        if (!gup_huge_pmd(pmdp, pmd, addr, next,
                                          write, pages, nr))
                                return 0;
@@ -180,8 +180,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
        addr = start;
        len = (unsigned long) nr_pages << PAGE_SHIFT;
        end = start + len;
-       if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
-                                       (void __user *)start, len)))
+       if ((end < start) || (end > TASK_SIZE))
                return 0;
 
        local_irq_save(flags);
@@ -229,7 +228,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
        addr = start;
        len = (unsigned long) nr_pages << PAGE_SHIFT;
        end = start + len;
-       if (end < start)
+       if ((end < start) || (end > TASK_SIZE))
                goto slow_irqon;
 
        /*
index b6b442b..9f2edb5 100644 (file)
@@ -20,6 +20,7 @@ config SPARC
        select HAVE_ARCH_TRACEHOOK
        select SYSCTL_EXCEPTION_TRACE
        select ARCH_WANT_OPTIONAL_GPIOLIB
+       select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
        select RTC_CLASS
        select RTC_DRV_M48T59
        select HAVE_IRQ_WORK
index 6ae1ad5..5d469d8 100644 (file)
@@ -13,13 +13,13 @@ obj-$(CONFIG_CRYPTO_DES_SPARC64) += camellia-sparc64.o
 
 obj-$(CONFIG_CRYPTO_CRC32C_SPARC64) += crc32c-sparc64.o
 
-sha1-sparc64-y := sha1_asm.o sha1_glue.o crop_devid.o
-sha256-sparc64-y := sha256_asm.o sha256_glue.o crop_devid.o
-sha512-sparc64-y := sha512_asm.o sha512_glue.o crop_devid.o
-md5-sparc64-y := md5_asm.o md5_glue.o crop_devid.o
+sha1-sparc64-y := sha1_asm.o sha1_glue.o
+sha256-sparc64-y := sha256_asm.o sha256_glue.o
+sha512-sparc64-y := sha512_asm.o sha512_glue.o
+md5-sparc64-y := md5_asm.o md5_glue.o
 
-aes-sparc64-y := aes_asm.o aes_glue.o crop_devid.o
-des-sparc64-y := des_asm.o des_glue.o crop_devid.o
-camellia-sparc64-y := camellia_asm.o camellia_glue.o crop_devid.o
+aes-sparc64-y := aes_asm.o aes_glue.o
+des-sparc64-y := des_asm.o des_glue.o
+camellia-sparc64-y := camellia_asm.o camellia_glue.o
 
-crc32c-sparc64-y := crc32c_asm.o crc32c_glue.o crop_devid.o
+crc32c-sparc64-y := crc32c_asm.o crc32c_glue.o
index 8f1c998..3965d1d 100644 (file)
@@ -475,3 +475,5 @@ MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated");
 
 MODULE_ALIAS("aes");
+
+#include "crop_devid.c"
index 42905c0..62c89af 100644 (file)
@@ -320,3 +320,5 @@ MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated");
 
 MODULE_ALIAS("aes");
+
+#include "crop_devid.c"
index 0bd89ce..5162fad 100644 (file)
@@ -177,3 +177,5 @@ MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated");
 
 MODULE_ALIAS("crc32c");
+
+#include "crop_devid.c"
index c4940c2..41524ce 100644 (file)
@@ -527,3 +527,5 @@ MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated");
 
 MODULE_ALIAS("des");
+
+#include "crop_devid.c"
index 603d723..09a9ea1 100644 (file)
@@ -186,3 +186,5 @@ MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated");
 
 MODULE_ALIAS("md5");
+
+#include "crop_devid.c"
index 2bbb20b..6cd5f29 100644 (file)
@@ -181,3 +181,5 @@ MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated");
 
 MODULE_ALIAS("sha1");
+
+#include "crop_devid.c"
index 591e656..04f555a 100644 (file)
@@ -239,3 +239,5 @@ MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 op
 
 MODULE_ALIAS("sha224");
 MODULE_ALIAS("sha256");
+
+#include "crop_devid.c"
index 486f0a2..f04d199 100644 (file)
@@ -224,3 +224,5 @@ MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 op
 
 MODULE_ALIAS("sha384");
 MODULE_ALIAS("sha512");
+
+#include "crop_devid.c"
index ce35a1c..be56a24 100644 (file)
@@ -1,7 +1,7 @@
 /* atomic.h: Thankfully the V9 is at least reasonable for this
  *           stuff.
  *
- * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1996, 1997, 2000, 2012 David S. Miller (davem@redhat.com)
  */
 
 #ifndef __ARCH_SPARC64_ATOMIC__
@@ -106,6 +106,8 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
 
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
 
+extern long atomic64_dec_if_positive(atomic64_t *v);
+
 /* Atomic operations are already serializing */
 #define smp_mb__before_atomic_dec()    barrier()
 #define smp_mb__after_atomic_dec()     barrier()
index db3af0d..4e02086 100644 (file)
@@ -1,6 +1,46 @@
 #ifndef _SPARC64_BACKOFF_H
 #define _SPARC64_BACKOFF_H
 
+/* The macros in this file implement an exponential backoff facility
+ * for atomic operations.
+ *
+ * When multiple threads compete on an atomic operation, it is
+ * possible for one thread to be continually denied a successful
+ * completion of the compare-and-swap instruction.  Heavily
+ * threaded cpu implementations like Niagara can compound this
+ * problem even further.
+ *
+ * When an atomic operation fails and needs to be retried, we spin a
+ * certain number of times.  At each subsequent failure of the same
+ * operation we double the spin count, realizing an exponential
+ * backoff.
+ *
+ * When we spin, we try to use an operation that will cause the
+ * current cpu strand to block, and therefore make the core fully
+ * available to any other other runnable strands.  There are two
+ * options, based upon cpu capabilities.
+ *
+ * On all cpus prior to SPARC-T4 we do three dummy reads of the
+ * condition code register.  Each read blocks the strand for something
+ * between 40 and 50 cpu cycles.
+ *
+ * For SPARC-T4 and later we have a special "pause" instruction
+ * available.  This is implemented using writes to register %asr27.
+ * The cpu will block the number of cycles written into the register,
+ * unless a disrupting trap happens first.  SPARC-T4 specifically
+ * implements pause with a granularity of 8 cycles.  Each strand has
+ * an internal pause counter which decrements every 8 cycles.  So the
+ * chip shifts the %asr27 value down by 3 bits, and writes the result
+ * into the pause counter.  If a value smaller than 8 is written, the
+ * chip blocks for 1 cycle.
+ *
+ * To achieve the same amount of backoff as the three %ccr reads give
+ * on earlier chips, we shift the backoff value up by 7 bits.  (Three
+ * %ccr reads block for about 128 cycles, 1 << 7 == 128) We write the
+ * whole amount we want to block into the pause register, rather than
+ * loop writing 128 each time.
+ */
+
 #define BACKOFF_LIMIT  (4 * 1024)
 
 #ifdef CONFIG_SMP
 #define BACKOFF_LABEL(spin_label, continue_label) \
        spin_label
 
-#define BACKOFF_SPIN(reg, tmp, label)  \
-       mov     reg, tmp; \
-88:    brnz,pt tmp, 88b; \
-        sub    tmp, 1, tmp; \
-       set     BACKOFF_LIMIT, tmp; \
-       cmp     reg, tmp; \
-       bg,pn   %xcc, label; \
-        nop; \
-       ba,pt   %xcc, label; \
-        sllx   reg, 1, reg;
+#define BACKOFF_SPIN(reg, tmp, label)          \
+       mov             reg, tmp;               \
+88:    rd              %ccr, %g0;              \
+       rd              %ccr, %g0;              \
+       rd              %ccr, %g0;              \
+       .section        .pause_3insn_patch,"ax";\
+       .word           88b;                    \
+       sllx            tmp, 7, tmp;            \
+       wr              tmp, 0, %asr27;         \
+       clr             tmp;                    \
+       .previous;                              \
+       brnz,pt         tmp, 88b;               \
+        sub            tmp, 1, tmp;            \
+       set             BACKOFF_LIMIT, tmp;     \
+       cmp             reg, tmp;               \
+       bg,pn           %xcc, label;            \
+        nop;                                   \
+       ba,pt           %xcc, label;            \
+        sllx           reg, 1, reg;
 
 #else
 
index cef99fb..830502f 100644 (file)
@@ -232,9 +232,10 @@ static inline void __user *arch_compat_alloc_user_space(long len)
        struct pt_regs *regs = current_thread_info()->kregs;
        unsigned long usp = regs->u_regs[UREG_I6];
 
-       if (!(test_thread_flag(TIF_32BIT)))
+       if (test_thread_64bit_stack(usp))
                usp += STACK_BIAS;
-       else
+
+       if (test_thread_flag(TIF_32BIT))
                usp &= 0xffffffffUL;
 
        usp -= len;
index 4e5a483..721e25f 100644 (file)
@@ -196,7 +196,22 @@ extern unsigned long get_wchan(struct task_struct *task);
 #define KSTK_EIP(tsk)  (task_pt_regs(tsk)->tpc)
 #define KSTK_ESP(tsk)  (task_pt_regs(tsk)->u_regs[UREG_FP])
 
-#define cpu_relax()    barrier()
+/* Please see the commentary in asm/backoff.h for a description of
+ * what these instructions are doing and how they have been choosen.
+ * To make a long story short, we are trying to yield the current cpu
+ * strand during busy loops.
+ */
+#define cpu_relax()    asm volatile("\n99:\n\t"                        \
+                                    "rd        %%ccr, %%g0\n\t"        \
+                                    "rd        %%ccr, %%g0\n\t"        \
+                                    "rd        %%ccr, %%g0\n\t"        \
+                                    ".section  .pause_3insn_patch,\"ax\"\n\t"\
+                                    ".word     99b\n\t"                \
+                                    "wr        %%g0, 128, %%asr27\n\t" \
+                                    "nop\n\t"                          \
+                                    "nop\n\t"                          \
+                                    ".previous"                        \
+                                    ::: "memory")
 
 /* Prefetch support.  This is tuned for UltraSPARC-III and later.
  * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
index c287651..67c6257 100644 (file)
@@ -63,5 +63,13 @@ extern char *of_console_options;
 extern void irq_trans_init(struct device_node *dp);
 extern char *build_path_component(struct device_node *dp);
 
+/* SPARC has local implementations */
+extern int of_address_to_resource(struct device_node *dev, int index,
+                                 struct resource *r);
+#define of_address_to_resource of_address_to_resource
+
+void __iomem *of_iomap(struct device_node *node, int index);
+#define of_iomap of_iomap
+
 #endif /* __KERNEL__ */
 #endif /* _SPARC_PROM_H */
index 4e22766..a3fe4dc 100644 (file)
@@ -259,6 +259,11 @@ static inline bool test_and_clear_restore_sigmask(void)
 
 #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
 
+#define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0)
+#define test_thread_64bit_stack(__SP) \
+       ((test_thread_flag(TIF_32BIT) && !thread32_stack_is_64bit(__SP)) ? \
+        false : true)
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
index 48f2807..71b5a67 100644 (file)
@@ -372,7 +372,9 @@ etrap_spill_fixup_64bit:                            \
 
 /* Normal 32bit spill */
 #define SPILL_2_GENERIC(ASI)                           \
-       srl     %sp, 0, %sp;                            \
+       and     %sp, 1, %g3;                            \
+       brnz,pn %g3, (. - (128 + 4));                   \
+        srl    %sp, 0, %sp;                            \
        stwa    %l0, [%sp + %g0] ASI;                   \
        mov     0x04, %g3;                              \
        stwa    %l1, [%sp + %g3] ASI;                   \
@@ -398,14 +400,16 @@ etrap_spill_fixup_64bit:                          \
        stwa    %i6, [%g1 + %g0] ASI;                   \
        stwa    %i7, [%g1 + %g3] ASI;                   \
        saved;                                          \
-        retry; nop; nop;                               \
+        retry;                                         \
        b,a,pt  %xcc, spill_fixup_dax;                  \
        b,a,pt  %xcc, spill_fixup_mna;                  \
        b,a,pt  %xcc, spill_fixup;
 
 #define SPILL_2_GENERIC_ETRAP          \
 etrap_user_spill_32bit:                        \
-       srl     %sp, 0, %sp;            \
+       and     %sp, 1, %g3;            \
+       brnz,pn %g3, etrap_user_spill_64bit;    \
+        srl    %sp, 0, %sp;            \
        stwa    %l0, [%sp + 0x00] %asi; \
        stwa    %l1, [%sp + 0x04] %asi; \
        stwa    %l2, [%sp + 0x08] %asi; \
@@ -427,7 +431,7 @@ etrap_user_spill_32bit:                     \
        ba,pt   %xcc, etrap_save;       \
         wrpr   %g1, %cwp;              \
        nop; nop; nop; nop;             \
-       nop; nop; nop; nop;             \
+       nop; nop;                       \
        ba,a,pt %xcc, etrap_spill_fixup_32bit; \
        ba,a,pt %xcc, etrap_spill_fixup_32bit; \
        ba,a,pt %xcc, etrap_spill_fixup_32bit;
@@ -592,7 +596,9 @@ user_rtt_fill_64bit:                                        \
 
 /* Normal 32bit fill */
 #define FILL_2_GENERIC(ASI)                            \
-       srl     %sp, 0, %sp;                            \
+       and     %sp, 1, %g3;                            \
+       brnz,pn %g3, (. - (128 + 4));                   \
+        srl    %sp, 0, %sp;                            \
        lduwa   [%sp + %g0] ASI, %l0;                   \
        mov     0x04, %g2;                              \
        mov     0x08, %g3;                              \
@@ -616,14 +622,16 @@ user_rtt_fill_64bit:                                      \
        lduwa   [%g1 + %g3] ASI, %i6;                   \
        lduwa   [%g1 + %g5] ASI, %i7;                   \
        restored;                                       \
-       retry; nop; nop; nop; nop;                      \
+       retry; nop; nop;                                \
        b,a,pt  %xcc, fill_fixup_dax;                   \
        b,a,pt  %xcc, fill_fixup_mna;                   \
        b,a,pt  %xcc, fill_fixup;
 
 #define FILL_2_GENERIC_RTRAP                           \
 user_rtt_fill_32bit:                                   \
-       srl     %sp, 0, %sp;                            \
+       and     %sp, 1, %g3;                            \
+       brnz,pn %g3, user_rtt_fill_64bit;               \
+        srl    %sp, 0, %sp;                            \
        lduwa   [%sp + 0x00] %asi, %l0;                 \
        lduwa   [%sp + 0x04] %asi, %l1;                 \
        lduwa   [%sp + 0x08] %asi, %l2;                 \
@@ -643,7 +651,7 @@ user_rtt_fill_32bit:                                        \
        ba,pt   %xcc, user_rtt_pre_restore;             \
         restored;                                      \
        nop; nop; nop; nop; nop;                        \
-       nop; nop; nop; nop; nop;                        \
+       nop; nop; nop;                                  \
        ba,a,pt %xcc, user_rtt_fill_fixup;              \
        ba,a,pt %xcc, user_rtt_fill_fixup;              \
        ba,a,pt %xcc, user_rtt_fill_fixup;
index 8974ef7..cac719d 100644 (file)
 #define __NR_setns             337
 #define __NR_process_vm_readv  338
 #define __NR_process_vm_writev 339
+#define __NR_kern_features     340
+#define __NR_kcmp              341
 
-#define NR_syscalls            340
+#define NR_syscalls            342
+
+/* Bitmask values returned from kern_features system call.  */
+#define KERN_FEATURE_MIXED_MODE_STACK  0x00000001
 
 #ifdef __32bit_syscall_numbers__
 /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
index 0c218e4..cc3c5cb 100644 (file)
@@ -59,6 +59,13 @@ struct popc_6insn_patch_entry {
 extern struct popc_6insn_patch_entry __popc_6insn_patch,
        __popc_6insn_patch_end;
 
+struct pause_patch_entry {
+       unsigned int    addr;
+       unsigned int    insns[3];
+};
+extern struct pause_patch_entry __pause_3insn_patch,
+       __pause_3insn_patch_end;
+
 extern void __init per_cpu_patch(void);
 extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
                                    struct sun4v_1insn_patch_entry *);
index f8b6eee..87f60ee 100644 (file)
@@ -56,11 +56,13 @@ static inline unsigned int leon_eirq_get(int cpu)
 static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc)
 {
        unsigned int eirq;
+       struct irq_bucket *p;
        int cpu = sparc_leon3_cpuid();
 
        eirq = leon_eirq_get(cpu);
-       if ((eirq & 0x10) && irq_map[eirq]->irq) /* bit4 tells if IRQ happened */
-               generic_handle_irq(irq_map[eirq]->irq);
+       p = irq_map[eirq];
+       if ((eirq & 0x10) && p && p->irq) /* bit4 tells if IRQ happened */
+               generic_handle_irq(p->irq);
 }
 
 /* The extended IRQ controller has been found, this function registers it */
index 885a8af..b5c38fa 100644 (file)
@@ -1762,15 +1762,25 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
 
        ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
        do {
-               struct sparc_stackf32 *usf, sf;
                unsigned long pc;
 
-               usf = (struct sparc_stackf32 *) ufp;
-               if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
-                       break;
+               if (thread32_stack_is_64bit(ufp)) {
+                       struct sparc_stackf *usf, sf;
 
-               pc = sf.callers_pc;
-               ufp = (unsigned long)sf.fp;
+                       ufp += STACK_BIAS;
+                       usf = (struct sparc_stackf *) ufp;
+                       if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
+                               break;
+                       pc = sf.callers_pc & 0xffffffff;
+                       ufp = ((unsigned long) sf.fp) & 0xffffffff;
+               } else {
+                       struct sparc_stackf32 *usf, sf;
+                       usf = (struct sparc_stackf32 *) ufp;
+                       if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
+                               break;
+                       pc = sf.callers_pc;
+                       ufp = (unsigned long)sf.fp;
+               }
                perf_callchain_store(entry, pc);
        } while (entry->nr < PERF_MAX_STACK_DEPTH);
 }
index d778248..c6e0c29 100644 (file)
@@ -452,13 +452,16 @@ void flush_thread(void)
 /* It's a bit more tricky when 64-bit tasks are involved... */
 static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
 {
+       bool stack_64bit = test_thread_64bit_stack(psp);
        unsigned long fp, distance, rval;
 
-       if (!(test_thread_flag(TIF_32BIT))) {
+       if (stack_64bit) {
                csp += STACK_BIAS;
                psp += STACK_BIAS;
                __get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
                fp += STACK_BIAS;
+               if (test_thread_flag(TIF_32BIT))
+                       fp &= 0xffffffff;
        } else
                __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
 
@@ -472,7 +475,7 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
        rval = (csp - distance);
        if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
                rval = 0;
-       else if (test_thread_flag(TIF_32BIT)) {
+       else if (!stack_64bit) {
                if (put_user(((u32)csp),
                             &(((struct reg_window32 __user *)rval)->ins[6])))
                        rval = 0;
@@ -507,18 +510,18 @@ void synchronize_user_stack(void)
 
        flush_user_windows();
        if ((window = get_thread_wsaved()) != 0) {
-               int winsize = sizeof(struct reg_window);
-               int bias = 0;
-
-               if (test_thread_flag(TIF_32BIT))
-                       winsize = sizeof(struct reg_window32);
-               else
-                       bias = STACK_BIAS;
-
                window -= 1;
                do {
-                       unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
                        struct reg_window *rwin = &t->reg_window[window];
+                       int winsize = sizeof(struct reg_window);
+                       unsigned long sp;
+
+                       sp = t->rwbuf_stkptrs[window];
+
+                       if (test_thread_64bit_stack(sp))
+                               sp += STACK_BIAS;
+                       else
+                               winsize = sizeof(struct reg_window32);
 
                        if (!copy_to_user((char __user *)sp, rwin, winsize)) {
                                shift_window_buffer(window, get_thread_wsaved() - 1, t);
@@ -544,13 +547,6 @@ void fault_in_user_windows(void)
 {
        struct thread_info *t = current_thread_info();
        unsigned long window;
-       int winsize = sizeof(struct reg_window);
-       int bias = 0;
-
-       if (test_thread_flag(TIF_32BIT))
-               winsize = sizeof(struct reg_window32);
-       else
-               bias = STACK_BIAS;
 
        flush_user_windows();
        window = get_thread_wsaved();
@@ -558,8 +554,16 @@ void fault_in_user_windows(void)
        if (likely(window != 0)) {
                window -= 1;
                do {
-                       unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
                        struct reg_window *rwin = &t->reg_window[window];
+                       int winsize = sizeof(struct reg_window);
+                       unsigned long sp;
+
+                       sp = t->rwbuf_stkptrs[window];
+
+                       if (test_thread_64bit_stack(sp))
+                               sp += STACK_BIAS;
+                       else
+                               winsize = sizeof(struct reg_window32);
 
                        if (unlikely(sp & 0x7UL))
                                stack_unaligned(sp);
index 484daba..7ff45e4 100644 (file)
@@ -151,7 +151,7 @@ static int regwindow64_get(struct task_struct *target,
 {
        unsigned long rw_addr = regs->u_regs[UREG_I6];
 
-       if (test_tsk_thread_flag(current, TIF_32BIT)) {
+       if (!test_thread_64bit_stack(rw_addr)) {
                struct reg_window32 win32;
                int i;
 
@@ -176,7 +176,7 @@ static int regwindow64_set(struct task_struct *target,
 {
        unsigned long rw_addr = regs->u_regs[UREG_I6];
 
-       if (test_tsk_thread_flag(current, TIF_32BIT)) {
+       if (!test_thread_64bit_stack(rw_addr)) {
                struct reg_window32 win32;
                int i;
 
index 0800e71..0eaf005 100644 (file)
@@ -316,6 +316,25 @@ static void __init popc_patch(void)
        }
 }
 
+static void __init pause_patch(void)
+{
+       struct pause_patch_entry *p;
+
+       p = &__pause_3insn_patch;
+       while (p < &__pause_3insn_patch_end) {
+               unsigned long i, addr = p->addr;
+
+               for (i = 0; i < 3; i++) {
+                       *(unsigned int *) (addr +  (i * 4)) = p->insns[i];
+                       wmb();
+                       __asm__ __volatile__("flush     %0"
+                                            : : "r" (addr +  (i * 4)));
+               }
+
+               p++;
+       }
+}
+
 #ifdef CONFIG_SMP
 void __init boot_cpu_id_too_large(int cpu)
 {
@@ -528,6 +547,8 @@ static void __init init_sparc64_elf_hwcap(void)
 
        if (sparc64_elf_hwcap & AV_SPARC_POPC)
                popc_patch();
+       if (sparc64_elf_hwcap & AV_SPARC_PAUSE)
+               pause_patch();
 }
 
 void __init setup_arch(char **cmdline_p)
index 867de2f..689e1ba 100644 (file)
@@ -295,9 +295,7 @@ void do_rt_sigreturn(struct pt_regs *regs)
                err |= restore_fpu_state(regs, fpu_save);
 
        err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
-       err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf);
-
-       if (err)
+       if (err || do_sigaltstack(&sf->stack, NULL, (unsigned long)sf) == -EFAULT)
                goto segv;
 
        err |= __get_user(rwin_save, &sf->rwin_save);
index 11c6c96..878ef3d 100644 (file)
@@ -751,3 +751,8 @@ int kernel_execve(const char *filename,
                      : "cc");
        return __res;
 }
+
+asmlinkage long sys_kern_features(void)
+{
+       return KERN_FEATURE_MIXED_MODE_STACK;
+}
index 63402f9..5147f57 100644 (file)
@@ -85,3 +85,4 @@ sys_call_table:
 /*325*/        .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
 /*330*/        .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
 /*335*/        .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
+/*340*/        .long sys_ni_syscall, sys_kcmp
index 3a58e0d..1c9af9f 100644 (file)
@@ -86,6 +86,7 @@ sys_call_table32:
        .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
 /*330*/        .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
        .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
+/*340*/        .word sys_kern_features, sys_kcmp
 
 #endif /* CONFIG_COMPAT */
 
@@ -163,3 +164,4 @@ sys_call_table:
        .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
 /*330*/        .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
        .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
+/*340*/        .word sys_kern_features, sys_kcmp
index f81d038..8201c25 100644 (file)
@@ -113,21 +113,24 @@ static inline long sign_extend_imm13(long imm)
 
 static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
 {
-       unsigned long value;
+       unsigned long value, fp;
        
        if (reg < 16)
                return (!reg ? 0 : regs->u_regs[reg]);
+
+       fp = regs->u_regs[UREG_FP];
+
        if (regs->tstate & TSTATE_PRIV) {
                struct reg_window *win;
-               win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+               win = (struct reg_window *)(fp + STACK_BIAS);
                value = win->locals[reg - 16];
-       } else if (test_thread_flag(TIF_32BIT)) {
+       } else if (!test_thread_64bit_stack(fp)) {
                struct reg_window32 __user *win32;
-               win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+               win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
                get_user(value, &win32->locals[reg - 16]);
        } else {
                struct reg_window __user *win;
-               win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+               win = (struct reg_window __user *)(fp + STACK_BIAS);
                get_user(value, &win->locals[reg - 16]);
        }
        return value;
@@ -135,19 +138,24 @@ static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
 
 static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
 {
+       unsigned long fp;
+
        if (reg < 16)
                return &regs->u_regs[reg];
+
+       fp = regs->u_regs[UREG_FP];
+
        if (regs->tstate & TSTATE_PRIV) {
                struct reg_window *win;
-               win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+               win = (struct reg_window *)(fp + STACK_BIAS);
                return &win->locals[reg - 16];
-       } else if (test_thread_flag(TIF_32BIT)) {
+       } else if (!test_thread_64bit_stack(fp)) {
                struct reg_window32 *win32;
-               win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+               win32 = (struct reg_window32 *)((unsigned long)((u32)fp));
                return (unsigned long *)&win32->locals[reg - 16];
        } else {
                struct reg_window *win;
-               win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+               win = (struct reg_window *)(fp + STACK_BIAS);
                return &win->locals[reg - 16];
        }
 }
@@ -392,13 +400,15 @@ int handle_popc(u32 insn, struct pt_regs *regs)
                if (rd)
                        regs->u_regs[rd] = ret;
        } else {
-               if (test_thread_flag(TIF_32BIT)) {
+               unsigned long fp = regs->u_regs[UREG_FP];
+
+               if (!test_thread_64bit_stack(fp)) {
                        struct reg_window32 __user *win32;
-                       win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+                       win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
                        put_user(ret, &win32->locals[rd - 16]);
                } else {
                        struct reg_window __user *win;
-                       win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+                       win = (struct reg_window __user *)(fp + STACK_BIAS);
                        put_user(ret, &win->locals[rd - 16]);
                }
        }
@@ -554,7 +564,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs)
                reg[0] = 0;
                if ((insn & 0x780000) == 0x180000)
                        reg[1] = 0;
-       } else if (test_thread_flag(TIF_32BIT)) {
+       } else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) {
                put_user(0, (int __user *) reg);
                if ((insn & 0x780000) == 0x180000)
                        put_user(0, ((int __user *) reg) + 1);
index 08e074b..c096c62 100644 (file)
@@ -149,21 +149,24 @@ static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
 
 static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
 {
-       unsigned long value;
+       unsigned long value, fp;
        
        if (reg < 16)
                return (!reg ? 0 : regs->u_regs[reg]);
+
+       fp = regs->u_regs[UREG_FP];
+
        if (regs->tstate & TSTATE_PRIV) {
                struct reg_window *win;
-               win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+               win = (struct reg_window *)(fp + STACK_BIAS);
                value = win->locals[reg - 16];
-       } else if (test_thread_flag(TIF_32BIT)) {
+       } else if (!test_thread_64bit_stack(fp)) {
                struct reg_window32 __user *win32;
-               win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+               win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
                get_user(value, &win32->locals[reg - 16]);
        } else {
                struct reg_window __user *win;
-               win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+               win = (struct reg_window __user *)(fp + STACK_BIAS);
                get_user(value, &win->locals[reg - 16]);
        }
        return value;
@@ -172,16 +175,18 @@ static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
 static inline unsigned long __user *__fetch_reg_addr_user(unsigned int reg,
                                                          struct pt_regs *regs)
 {
+       unsigned long fp = regs->u_regs[UREG_FP];
+
        BUG_ON(reg < 16);
        BUG_ON(regs->tstate & TSTATE_PRIV);
 
-       if (test_thread_flag(TIF_32BIT)) {
+       if (!test_thread_64bit_stack(fp)) {
                struct reg_window32 __user *win32;
-               win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+               win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
                return (unsigned long __user *)&win32->locals[reg - 16];
        } else {
                struct reg_window __user *win;
-               win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+               win = (struct reg_window __user *)(fp + STACK_BIAS);
                return &win->locals[reg - 16];
        }
 }
@@ -204,7 +209,7 @@ static void store_reg(struct pt_regs *regs, unsigned long val, unsigned long rd)
        } else {
                unsigned long __user *rd_user = __fetch_reg_addr_user(rd, regs);
 
-               if (test_thread_flag(TIF_32BIT))
+               if (!test_thread_64bit_stack(regs->u_regs[UREG_FP]))
                        __put_user((u32)val, (u32 __user *)rd_user);
                else
                        __put_user(val, rd_user);
index 89c2c29..0bacceb 100644 (file)
@@ -132,6 +132,11 @@ SECTIONS
                *(.popc_6insn_patch)
                __popc_6insn_patch_end = .;
        }
+       .pause_3insn_patch : {
+               __pause_3insn_patch = .;
+               *(.pause_3insn_patch)
+               __pause_3insn_patch_end = .;
+       }
        PERCPU_SECTION(SMP_CACHE_BYTES)
 
        . = ALIGN(PAGE_SIZE);
index a6b0863..1e67ce9 100644 (file)
@@ -43,6 +43,8 @@ spill_fixup_mna:
 spill_fixup_dax:
        TRAP_LOAD_THREAD_REG(%g6, %g1)
        ldx     [%g6 + TI_FLAGS], %g1
+       andcc   %sp, 0x1, %g0
+       movne   %icc, 0, %g1
        andcc   %g1, _TIF_32BIT, %g0
        ldub    [%g6 + TI_WSAVED], %g1
        sll     %g1, 3, %g3
index 4d502da..85c233d 100644 (file)
@@ -1,6 +1,6 @@
 /* atomic.S: These things are too big to do inline.
  *
- * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1999, 2007 2012 David S. Miller (davem@davemloft.net)
  */
 
 #include <linux/linkage.h>
@@ -117,3 +117,17 @@ ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
         sub    %g1, %o0, %o0
 2:     BACKOFF_SPIN(%o2, %o3, 1b)
 ENDPROC(atomic64_sub_ret)
+
+ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
+       BACKOFF_SETUP(%o2)
+1:     ldx     [%o0], %g1
+       brlez,pn %g1, 3f
+        sub    %g1, 1, %g7
+       casx    [%o0], %g1, %g7
+       cmp     %g1, %g7
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
+        nop
+3:     retl
+        sub    %g1, 1, %o0
+2:     BACKOFF_SPIN(%o2, %o3, 1b)
+ENDPROC(atomic64_dec_if_positive)
index ee31b88..0c4e35e 100644 (file)
@@ -116,6 +116,7 @@ EXPORT_SYMBOL(atomic64_add);
 EXPORT_SYMBOL(atomic64_add_ret);
 EXPORT_SYMBOL(atomic64_sub);
 EXPORT_SYMBOL(atomic64_sub_ret);
+EXPORT_SYMBOL(atomic64_dec_if_positive);
 
 /* Atomic bit operations. */
 EXPORT_SYMBOL(test_and_set_bit);
index 1704068..034aadb 100644 (file)
@@ -320,7 +320,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap)
                                        XR = 0;
                                else if (freg < 16)
                                        XR = regs->u_regs[freg];
-                               else if (test_thread_flag(TIF_32BIT)) {
+                               else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) {
                                        struct reg_window32 __user *win32;
                                        flushw_user ();
                                        win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
index e5c5473..c4fbb21 100644 (file)
@@ -16,6 +16,8 @@ config UNICORE32
        select ARCH_WANT_FRAME_POINTERS
        select GENERIC_IOMAP
        select MODULES_USE_ELF_REL
+       select GENERIC_KERNEL_THREAD
+       select GENERIC_KERNEL_EXECVE
        help
          UniCore-32 is 32-bit Instruction Set Architecture,
          including a series of low-power-consumption RISC chip
@@ -64,6 +66,9 @@ config GENERIC_CALIBRATE_DELAY
 config ARCH_MAY_HAVE_PC_FDC
        bool
 
+config ZONE_DMA
+       def_bool y
+
 config NEED_DMA_MAP_STATE
        def_bool y
 
@@ -216,7 +221,7 @@ config PUV3_GPIO
        bool
        depends on !ARCH_FPGA
        select GENERIC_GPIO
-       select GPIO_SYSFS if EXPERIMENTAL
+       select GPIO_SYSFS
        default y
 
 if PUV3_NB0916
index c910c98..601e92f 100644 (file)
@@ -1,4 +1,3 @@
-include include/asm-generic/Kbuild.asm
 
 generic-y += atomic.h
 generic-y += auxvec.h
index b1ff8ca..93a56f3 100644 (file)
@@ -19,9 +19,4 @@ extern void die(const char *msg, struct pt_regs *regs, int err);
 extern void uc32_notify_die(const char *str, struct pt_regs *regs,
                struct siginfo *info, unsigned long err, unsigned long trap);
 
-extern asmlinkage void __backtrace(void);
-extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
-
-extern void __show_regs(struct pt_regs *);
-
 #endif /* __UNICORE_BUG_H__ */
index df4d5ac..8e797ad 100644 (file)
@@ -35,7 +35,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
                        : "memory", "cc");
                break;
        default:
-               ret = __xchg_bad_pointer();
+               __xchg_bad_pointer();
        }
 
        return ret;
diff --git a/arch/unicore32/include/asm/kvm_para.h b/arch/unicore32/include/asm/kvm_para.h
deleted file mode 100644 (file)
index 14fab8f..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kvm_para.h>
index 14382cb..4eaa421 100644 (file)
@@ -72,11 +72,6 @@ unsigned long get_wchan(struct task_struct *p);
 
 #define cpu_relax()                    barrier()
 
-/*
- * Create a new kernel thread
- */
-extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
-
 #define task_pt_regs(p) \
        ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
 
index b9caf9b..726749d 100644 (file)
 #ifndef __UNICORE_PTRACE_H__
 #define __UNICORE_PTRACE_H__
 
-#define PTRACE_GET_THREAD_AREA 22
-
-/*
- * PSR bits
- */
-#define USER_MODE      0x00000010
-#define REAL_MODE      0x00000011
-#define INTR_MODE      0x00000012
-#define PRIV_MODE      0x00000013
-#define ABRT_MODE      0x00000017
-#define EXTN_MODE      0x0000001b
-#define SUSR_MODE      0x0000001f
-#define MODE_MASK      0x0000001f
-#define PSR_R_BIT      0x00000040
-#define PSR_I_BIT      0x00000080
-#define PSR_V_BIT      0x10000000
-#define PSR_C_BIT      0x20000000
-#define PSR_Z_BIT      0x40000000
-#define PSR_S_BIT      0x80000000
-
-/*
- * Groups of PSR bits
- */
-#define PSR_f          0xff000000      /* Flags                */
-#define PSR_c          0x000000ff      /* Control              */
+#include <uapi/asm/ptrace.h>
 
 #ifndef __ASSEMBLY__
 
-/*
- * This struct defines the way the registers are stored on the
- * stack during a system call.  Note that sizeof(struct pt_regs)
- * has to be a multiple of 8.
- */
-struct pt_regs {
-       unsigned long uregs[34];
-};
-
-#define UCreg_asr              uregs[32]
-#define UCreg_pc               uregs[31]
-#define UCreg_lr               uregs[30]
-#define UCreg_sp               uregs[29]
-#define UCreg_ip               uregs[28]
-#define UCreg_fp               uregs[27]
-#define UCreg_26               uregs[26]
-#define UCreg_25               uregs[25]
-#define UCreg_24               uregs[24]
-#define UCreg_23               uregs[23]
-#define UCreg_22               uregs[22]
-#define UCreg_21               uregs[21]
-#define UCreg_20               uregs[20]
-#define UCreg_19               uregs[19]
-#define UCreg_18               uregs[18]
-#define UCreg_17               uregs[17]
-#define UCreg_16               uregs[16]
-#define UCreg_15               uregs[15]
-#define UCreg_14               uregs[14]
-#define UCreg_13               uregs[13]
-#define UCreg_12               uregs[12]
-#define UCreg_11               uregs[11]
-#define UCreg_10               uregs[10]
-#define UCreg_09               uregs[9]
-#define UCreg_08               uregs[8]
-#define UCreg_07               uregs[7]
-#define UCreg_06               uregs[6]
-#define UCreg_05               uregs[5]
-#define UCreg_04               uregs[4]
-#define UCreg_03               uregs[3]
-#define UCreg_02               uregs[2]
-#define UCreg_01               uregs[1]
-#define UCreg_00               uregs[0]
-#define UCreg_ORIG_00          uregs[33]
-
-#ifdef __KERNEL__
-
 #define user_mode(regs)        \
        (processor_mode(regs) == USER_MODE)
 
@@ -125,9 +55,5 @@ static inline int valid_user_regs(struct pt_regs *regs)
 
 #define instruction_pointer(regs)      ((regs)->UCreg_pc)
 
-#endif /* __KERNEL__ */
-
 #endif /* __ASSEMBLY__ */
-
 #endif
-
index baebb3d..0514d7a 100644 (file)
@@ -1,3 +1,10 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+header-y += byteorder.h
+header-y += kvm_para.h
+header-y += ptrace.h
+header-y += sigcontext.h
+header-y += unistd.h
+
+generic-y += kvm_para.h
diff --git a/arch/unicore32/include/uapi/asm/ptrace.h b/arch/unicore32/include/uapi/asm/ptrace.h
new file mode 100644 (file)
index 0000000..187aa2e
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * linux/arch/unicore32/include/asm/ptrace.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _UAPI__UNICORE_PTRACE_H__
+#define _UAPI__UNICORE_PTRACE_H__
+
+#define PTRACE_GET_THREAD_AREA 22
+
+/*
+ * PSR bits
+ */
+#define USER_MODE      0x00000010
+#define REAL_MODE      0x00000011
+#define INTR_MODE      0x00000012
+#define PRIV_MODE      0x00000013
+#define ABRT_MODE      0x00000017
+#define EXTN_MODE      0x0000001b
+#define SUSR_MODE      0x0000001f
+#define MODE_MASK      0x0000001f
+#define PSR_R_BIT      0x00000040
+#define PSR_I_BIT      0x00000080
+#define PSR_V_BIT      0x10000000
+#define PSR_C_BIT      0x20000000
+#define PSR_Z_BIT      0x40000000
+#define PSR_S_BIT      0x80000000
+
+/*
+ * Groups of PSR bits
+ */
+#define PSR_f          0xff000000      /* Flags                */
+#define PSR_c          0x000000ff      /* Control              */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This struct defines the way the registers are stored on the
+ * stack during a system call.  Note that sizeof(struct pt_regs)
+ * has to be a multiple of 8.
+ */
+struct pt_regs {
+       unsigned long uregs[34];
+};
+
+#define UCreg_asr              uregs[32]
+#define UCreg_pc               uregs[31]
+#define UCreg_lr               uregs[30]
+#define UCreg_sp               uregs[29]
+#define UCreg_ip               uregs[28]
+#define UCreg_fp               uregs[27]
+#define UCreg_26               uregs[26]
+#define UCreg_25               uregs[25]
+#define UCreg_24               uregs[24]
+#define UCreg_23               uregs[23]
+#define UCreg_22               uregs[22]
+#define UCreg_21               uregs[21]
+#define UCreg_20               uregs[20]
+#define UCreg_19               uregs[19]
+#define UCreg_18               uregs[18]
+#define UCreg_17               uregs[17]
+#define UCreg_16               uregs[16]
+#define UCreg_15               uregs[15]
+#define UCreg_14               uregs[14]
+#define UCreg_13               uregs[13]
+#define UCreg_12               uregs[12]
+#define UCreg_11               uregs[11]
+#define UCreg_10               uregs[10]
+#define UCreg_09               uregs[9]
+#define UCreg_08               uregs[8]
+#define UCreg_07               uregs[7]
+#define UCreg_06               uregs[6]
+#define UCreg_05               uregs[5]
+#define UCreg_04               uregs[4]
+#define UCreg_03               uregs[3]
+#define UCreg_02               uregs[2]
+#define UCreg_01               uregs[1]
+#define UCreg_00               uregs[0]
+#define UCreg_ORIG_00          uregs[33]
+
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _UAPI__UNICORE_PTRACE_H__ */
similarity index 92%
rename from arch/unicore32/include/asm/unistd.h
rename to arch/unicore32/include/uapi/asm/unistd.h
index 2abcf61..d18a3be 100644 (file)
@@ -12,3 +12,4 @@
 
 /* Use the standard ABI for syscalls. */
 #include <asm-generic/unistd.h>
+#define __ARCH_WANT_SYS_EXECVE
index dcb87ab..7049350 100644 (file)
@@ -573,17 +573,16 @@ ENDPROC(ret_to_user)
  */
 ENTRY(ret_from_fork)
        b.l     schedule_tail
-       get_thread_info tsk
-       ldw     r1, [tsk+], #TI_FLAGS           @ check for syscall tracing
-       mov     why, #1
-       cand.a  r1, #_TIF_SYSCALL_TRACE         @ are we tracing syscalls?
-       beq     ret_slow_syscall
-       mov     r1, sp
-       mov     r0, #1                          @ trace exit [IP = 1]
-       b.l     syscall_trace
        b       ret_slow_syscall
 ENDPROC(ret_from_fork)
 
+ENTRY(ret_from_kernel_thread)
+       b.l     schedule_tail
+       mov     r0, r5
+       adr     lr, ret_slow_syscall
+       mov     pc, r4
+ENDPROC(ret_from_kernel_thread)
+
 /*=============================================================================
  * SWI handler
  *-----------------------------------------------------------------------------
@@ -669,11 +668,6 @@ __cr_alignment:
 #endif
        .ltorg
 
-ENTRY(sys_execve)
-               add     r3, sp, #S_OFF
-               b       __sys_execve
-ENDPROC(sys_execve)
-
 ENTRY(sys_clone)
                add     ip, sp, #S_OFF
                stw     ip, [sp+], #4
index b008586..a8fe265 100644 (file)
@@ -258,6 +258,7 @@ void release_thread(struct task_struct *dead_task)
 }
 
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
 
 int
 copy_thread(unsigned long clone_flags, unsigned long stack_start,
@@ -266,17 +267,22 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
        struct thread_info *thread = task_thread_info(p);
        struct pt_regs *childregs = task_pt_regs(p);
 
-       *childregs = *regs;
-       childregs->UCreg_00 = 0;
-       childregs->UCreg_sp = stack_start;
-
        memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
        thread->cpu_context.sp = (unsigned long)childregs;
-       thread->cpu_context.pc = (unsigned long)ret_from_fork;
-
-       if (clone_flags & CLONE_SETTLS)
-               childregs->UCreg_16 = regs->UCreg_03;
+       if (unlikely(!regs)) {
+               thread->cpu_context.pc = (unsigned long)ret_from_kernel_thread;
+               thread->cpu_context.r4 = stack_start;
+               thread->cpu_context.r5 = stk_sz;
+               memset(childregs, 0, sizeof(struct pt_regs));
+       } else {
+               thread->cpu_context.pc = (unsigned long)ret_from_fork;
+               *childregs = *regs;
+               childregs->UCreg_00 = 0;
+               childregs->UCreg_sp = stack_start;
 
+               if (clone_flags & CLONE_SETTLS)
+                       childregs->UCreg_16 = regs->UCreg_03;
+       }
        return 0;
 }
 
@@ -305,42 +311,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fp)
 }
 EXPORT_SYMBOL(dump_fpu);
 
-/*
- * Shuffle the argument into the correct register before calling the
- * thread function.  r1 is the thread argument, r2 is the pointer to
- * the thread function, and r3 points to the exit function.
- */
-asm(".pushsection .text\n"
-"      .align\n"
-"      .type   kernel_thread_helper, #function\n"
-"kernel_thread_helper:\n"
-"      mov.a   asr, r7\n"
-"      mov     r0, r4\n"
-"      mov     lr, r6\n"
-"      mov     pc, r5\n"
-"      .size   kernel_thread_helper, . - kernel_thread_helper\n"
-"      .popsection");
-
-/*
- * Create a kernel thread.
- */
-pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
-{
-       struct pt_regs regs;
-
-       memset(&regs, 0, sizeof(regs));
-
-       regs.UCreg_04 = (unsigned long)arg;
-       regs.UCreg_05 = (unsigned long)fn;
-       regs.UCreg_06 = (unsigned long)do_exit;
-       regs.UCreg_07 = PRIV_MODE;
-       regs.UCreg_pc = (unsigned long)kernel_thread_helper;
-       regs.UCreg_asr = regs.UCreg_07 | PSR_I_BIT;
-
-       return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
-}
-EXPORT_SYMBOL(kernel_thread);
-
 unsigned long get_wchan(struct task_struct *p)
 {
        struct stackframe frame;
index f239550..30f749d 100644 (file)
@@ -30,4 +30,10 @@ extern char __vectors_start[], __vectors_end[];
 extern void kernel_thread_helper(void);
 
 extern void __init early_signal_init(void);
+
+extern asmlinkage void __backtrace(void);
+extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
+
+extern void __show_regs(struct pt_regs *);
+
 #endif
index fabdee9..9680134 100644 (file)
@@ -42,69 +42,6 @@ asmlinkage long __sys_clone(unsigned long clone_flags, unsigned long newsp,
                        parent_tid, child_tid);
 }
 
-/* sys_execve() executes a new program.
- * This is called indirectly via a small wrapper
- */
-asmlinkage long __sys_execve(const char __user *filename,
-                         const char __user *const __user *argv,
-                         const char __user *const __user *envp,
-                         struct pt_regs *regs)
-{
-       int error;
-       struct filename *fn;
-
-       fn = getname(filename);
-       error = PTR_ERR(fn);
-       if (IS_ERR(fn))
-               goto out;
-       error = do_execve(fn->name, argv, envp, regs);
-       putname(fn);
-out:
-       return error;
-}
-
-int kernel_execve(const char *filename,
-                 const char *const argv[],
-                 const char *const envp[])
-{
-       struct pt_regs regs;
-       int ret;
-
-       memset(&regs, 0, sizeof(struct pt_regs));
-       ret = do_execve(filename,
-                       (const char __user *const __user *)argv,
-                       (const char __user *const __user *)envp, &regs);
-       if (ret < 0)
-               goto out;
-
-       /*
-        * Save argc to the register structure for userspace.
-        */
-       regs.UCreg_00 = ret;
-
-       /*
-        * We were successful.  We won't be returning to our caller, but
-        * instead to user space by manipulating the kernel stack.
-        */
-       asm("add        r0, %0, %1\n\t"
-               "mov    r1, %2\n\t"
-               "mov    r2, %3\n\t"
-               "mov    r22, #0\n\t"    /* not a syscall */
-               "mov    r23, %0\n\t"    /* thread structure */
-               "b.l    memmove\n\t"    /* copy regs to top of stack */
-               "mov    sp, r0\n\t"     /* reposition stack pointer */
-               "b      ret_to_user"
-               :
-               : "r" (current_thread_info()),
-                 "Ir" (THREAD_START_SP - sizeof(regs)),
-                 "r" (&regs),
-                 "Ir" (sizeof(regs))
-               : "r0", "r1", "r2", "r3", "ip", "lr", "memory");
-
- out:
-       return ret;
-}
-
 /* Note: used by the compat code even in 64-bit Linux. */
 SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
                unsigned long, prot, unsigned long, flags,
index 2eeb9c0..f9b5c10 100644 (file)
@@ -168,7 +168,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
 }
 
 static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
-               struct task_struct *tsk)
+               unsigned int flags, struct task_struct *tsk)
 {
        struct vm_area_struct *vma;
        int fault;
@@ -194,14 +194,7 @@ good_area:
         * If for any reason at all we couldn't handle the fault, make
         * sure we exit gracefully rather than endlessly redo the fault.
         */
-       fault = handle_mm_fault(mm, vma, addr & PAGE_MASK,
-                           (!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0);
-       if (unlikely(fault & VM_FAULT_ERROR))
-               return fault;
-       if (fault & VM_FAULT_MAJOR)
-               tsk->maj_flt++;
-       else
-               tsk->min_flt++;
+       fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
        return fault;
 
 check_stack:
@@ -216,6 +209,8 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        struct task_struct *tsk;
        struct mm_struct *mm;
        int fault, sig, code;
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+                                ((!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0);
 
        tsk = current;
        mm = tsk->mm;
@@ -236,6 +231,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
                if (!user_mode(regs)
                    && !search_exception_tables(regs->UCreg_pc))
                        goto no_context;
+retry:
                down_read(&mm->mmap_sem);
        } else {
                /*
@@ -251,7 +247,28 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 #endif
        }
 
-       fault = __do_pf(mm, addr, fsr, tsk);
+       fault = __do_pf(mm, addr, fsr, flags, tsk);
+
+       /* If we need to retry but a fatal signal is pending, handle the
+        * signal first. We do not need to release the mmap_sem because
+        * it would already be released in __lock_page_or_retry in
+        * mm/filemap.c. */
+       if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+               return 0;
+
+       if (!(fault & VM_FAULT_ERROR) && (flags & FAULT_FLAG_ALLOW_RETRY)) {
+               if (fault & VM_FAULT_MAJOR)
+                       tsk->maj_flt++;
+               else
+                       tsk->min_flt++;
+               if (fault & VM_FAULT_RETRY) {
+                       /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+                       * of starvation. */
+                       flags &= ~FAULT_FLAG_ALLOW_RETRY;
+                       goto retry;
+               }
+       }
+
        up_read(&mm->mmap_sem);
 
        /*
index c760e07..e87b0ca 100644 (file)
@@ -12,6 +12,8 @@
 #include <asm/setup.h>
 #include <asm/desc.h>
 
+#undef memcpy                  /* Use memcpy from misc.c */
+
 #include "eboot.h"
 
 static efi_system_table_t *sys_table;
index 2a01744..8c132a6 100644 (file)
@@ -476,6 +476,3 @@ die:
 setup_corrupt:
        .byte   7
        .string "No setup signature found...\n"
-
-       .data
-dummy: .long   0
index dcfde52..19f16eb 100644 (file)
@@ -205,21 +205,14 @@ static inline bool user_64bit_mode(struct pt_regs *regs)
 }
 #endif
 
-/*
- * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
- * when it traps.  The previous stack will be directly underneath the saved
- * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
- *
- * This is valid only for kernel mode traps.
- */
-static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
-{
 #ifdef CONFIG_X86_32
-       return (unsigned long)(&regs->sp);
+extern unsigned long kernel_stack_pointer(struct pt_regs *regs);
 #else
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
        return regs->sp;
-#endif
 }
+#endif
 
 #define GET_IP(regs) ((regs)->ip)
 #define GET_FP(regs) ((regs)->bp)
index 59c226d..c20d1ce 100644 (file)
@@ -359,18 +359,14 @@ HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
                return _hypercall4(int, update_va_mapping, va,
                                   new_val.pte, new_val.pte >> 32, flags);
 }
+extern int __must_check xen_event_channel_op_compat(int, void *);
 
 static inline int
 HYPERVISOR_event_channel_op(int cmd, void *arg)
 {
        int rc = _hypercall2(int, event_channel_op, cmd, arg);
-       if (unlikely(rc == -ENOSYS)) {
-               struct evtchn_op op;
-               op.cmd = cmd;
-               memcpy(&op.u, arg, sizeof(op.u));
-               rc = _hypercall1(int, event_channel_op_compat, &op);
-               memcpy(arg, &op.u, sizeof(op.u));
-       }
+       if (unlikely(rc == -ENOSYS))
+               rc = xen_event_channel_op_compat(cmd, arg);
        return rc;
 }
 
@@ -386,17 +382,14 @@ HYPERVISOR_console_io(int cmd, int count, char *str)
        return _hypercall3(int, console_io, cmd, count, str);
 }
 
+extern int __must_check HYPERVISOR_physdev_op_compat(int, void *);
+
 static inline int
 HYPERVISOR_physdev_op(int cmd, void *arg)
 {
        int rc = _hypercall2(int, physdev_op, cmd, arg);
-       if (unlikely(rc == -ENOSYS)) {
-               struct physdev_op op;
-               op.cmd = cmd;
-               memcpy(&op.u, arg, sizeof(op.u));
-               rc = _hypercall1(int, physdev_op_compat, &op);
-               memcpy(arg, &op.u, sizeof(op.u));
-       }
+       if (unlikely(rc == -ENOSYS))
+               rc = HYPERVISOR_physdev_op_compat(cmd, arg);
        return rc;
 }
 
index f7e98a2..1b7d165 100644 (file)
@@ -631,6 +631,20 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
                }
        }
 
+       /*
+        * The way access filter has a performance penalty on some workloads.
+        * Disable it on the affected CPUs.
+        */
+       if ((c->x86 == 0x15) &&
+           (c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
+               u64 val;
+
+               if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) {
+                       val |= 0x1E;
+                       wrmsrl_safe(0xc0011021, val);
+               }
+       }
+
        cpu_detect_cache_sizes(c);
 
        /* Multi core CPU? */
index 698b6ec..1ac581f 100644 (file)
@@ -6,7 +6,7 @@
  *
  *  Written by Jacob Shin - AMD, Inc.
  *
- *  Support: borislav.petkov@amd.com
+ *  Maintained by: Borislav Petkov <bp@alien8.de>
  *
  *  April 2006
  *     - added support for AMD Family 0x10 processors
index 5f88abf..4f9a3cb 100644 (file)
@@ -285,34 +285,39 @@ void cmci_clear(void)
        raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
 }
 
+static long cmci_rediscover_work_func(void *arg)
+{
+       int banks;
+
+       /* Recheck banks in case CPUs don't all have the same */
+       if (cmci_supported(&banks))
+               cmci_discover(banks);
+
+       return 0;
+}
+
 /*
  * After a CPU went down cycle through all the others and rediscover
  * Must run in process context.
  */
 void cmci_rediscover(int dying)
 {
-       int banks;
-       int cpu;
-       cpumask_var_t old;
+       int cpu, banks;
 
        if (!cmci_supported(&banks))
                return;
-       if (!alloc_cpumask_var(&old, GFP_KERNEL))
-               return;
-       cpumask_copy(old, &current->cpus_allowed);
 
        for_each_online_cpu(cpu) {
                if (cpu == dying)
                        continue;
-               if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
+
+               if (cpu == smp_processor_id()) {
+                       cmci_rediscover_work_func(NULL);
                        continue;
-               /* Recheck banks in case CPUs don't all have the same */
-               if (cmci_supported(&banks))
-                       cmci_discover(banks);
-       }
+               }
 
-       set_cpus_allowed_ptr(current, old);
-       free_cpumask_var(old);
+               work_on_cpu(cpu, cmci_rediscover_work_func, NULL);
+       }
 }
 
 /*
index b51b2c7..1328fe4 100644 (file)
@@ -995,8 +995,8 @@ END(interrupt)
         */
        .p2align CONFIG_X86_L1_CACHE_SHIFT
 common_interrupt:
-       ASM_CLAC
        XCPT_FRAME
+       ASM_CLAC
        addq $-0x80,(%rsp)              /* Adjust vector to [-256,-1] range */
        interrupt do_IRQ
        /* 0(%rsp): old_rsp-ARGOFFSET */
@@ -1135,8 +1135,8 @@ END(common_interrupt)
  */
 .macro apicinterrupt num sym do_sym
 ENTRY(\sym)
-       ASM_CLAC
        INTR_FRAME
+       ASM_CLAC
        pushq_cfi $~(\num)
 .Lcommon_\sym:
        interrupt \do_sym
@@ -1190,8 +1190,8 @@ apicinterrupt IRQ_WORK_VECTOR \
  */
 .macro zeroentry sym do_sym
 ENTRY(\sym)
-       ASM_CLAC
        INTR_FRAME
+       ASM_CLAC
        PARAVIRT_ADJUST_EXCEPTION_FRAME
        pushq_cfi $-1           /* ORIG_RAX: no syscall to restart */
        subq $ORIG_RAX-R15, %rsp
@@ -1208,8 +1208,8 @@ END(\sym)
 
 .macro paranoidzeroentry sym do_sym
 ENTRY(\sym)
-       ASM_CLAC
        INTR_FRAME
+       ASM_CLAC
        PARAVIRT_ADJUST_EXCEPTION_FRAME
        pushq_cfi $-1           /* ORIG_RAX: no syscall to restart */
        subq $ORIG_RAX-R15, %rsp
@@ -1227,8 +1227,8 @@ END(\sym)
 #define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
 .macro paranoidzeroentry_ist sym do_sym ist
 ENTRY(\sym)
-       ASM_CLAC
        INTR_FRAME
+       ASM_CLAC
        PARAVIRT_ADJUST_EXCEPTION_FRAME
        pushq_cfi $-1           /* ORIG_RAX: no syscall to restart */
        subq $ORIG_RAX-R15, %rsp
@@ -1247,8 +1247,8 @@ END(\sym)
 
 .macro errorentry sym do_sym
 ENTRY(\sym)
-       ASM_CLAC
        XCPT_FRAME
+       ASM_CLAC
        PARAVIRT_ADJUST_EXCEPTION_FRAME
        subq $ORIG_RAX-R15, %rsp
        CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
@@ -1266,8 +1266,8 @@ END(\sym)
        /* error code is on the stack already */
 .macro paranoiderrorentry sym do_sym
 ENTRY(\sym)
-       ASM_CLAC
        XCPT_FRAME
+       ASM_CLAC
        PARAVIRT_ADJUST_EXCEPTION_FRAME
        subq $ORIG_RAX-R15, %rsp
        CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
index 7720ff5..efdec7c 100644 (file)
@@ -8,8 +8,8 @@
  *  Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
  *
  *  Maintainers:
- *  Andreas Herrmann <andreas.herrmann3@amd.com>
- *  Borislav Petkov <borislav.petkov@amd.com>
+ *  Andreas Herrmann <herrmann.der.user@googlemail.com>
+ *  Borislav Petkov <bp@alien8.de>
  *
  *  This driver allows to upgrade microcode on F10h AMD
  *  CPUs and later.
@@ -190,6 +190,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size,
 #define F1XH_MPB_MAX_SIZE 2048
 #define F14H_MPB_MAX_SIZE 1824
 #define F15H_MPB_MAX_SIZE 4096
+#define F16H_MPB_MAX_SIZE 3458
 
        switch (c->x86) {
        case 0x14:
@@ -198,6 +199,9 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size,
        case 0x15:
                max_size = F15H_MPB_MAX_SIZE;
                break;
+       case 0x16:
+               max_size = F16H_MPB_MAX_SIZE;
+               break;
        default:
                max_size = F1XH_MPB_MAX_SIZE;
                break;
index b00b33a..5e0596b 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/perf_event.h>
 #include <linux/hw_breakpoint.h>
 #include <linux/rcupdate.h>
+#include <linux/module.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -166,6 +167,35 @@ static inline bool invalid_selector(u16 value)
 
 #define FLAG_MASK              FLAG_MASK_32
 
+/*
+ * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
+ * when it traps.  The previous stack will be directly underneath the saved
+ * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
+ *
+ * Now, if the stack is empty, '&regs->sp' is out of range. In this
+ * case we try to take the previous stack. To always return a non-null
+ * stack pointer we fall back to regs as stack if no previous stack
+ * exists.
+ *
+ * This is valid only for kernel mode traps.
+ */
+unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+       unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
+       unsigned long sp = (unsigned long)&regs->sp;
+       struct thread_info *tinfo;
+
+       if (context == (sp & ~(THREAD_SIZE - 1)))
+               return sp;
+
+       tinfo = (struct thread_info *)context;
+       if (tinfo->previous_esp)
+               return tinfo->previous_esp;
+
+       return (unsigned long)regs;
+}
+EXPORT_SYMBOL_GPL(kernel_stack_pointer);
+
 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
 {
        BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
index a10e460..58fc514 100644 (file)
@@ -24,6 +24,9 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best;
 
+       if (!static_cpu_has(X86_FEATURE_XSAVE))
+               return 0;
+
        best = kvm_find_cpuid_entry(vcpu, 1, 0);
        return best && (best->ecx & bit(X86_FEATURE_XSAVE));
 }
index ad6b1dd..f858159 100644 (file)
@@ -6549,19 +6549,22 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
                }
        }
 
-       exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
        /* Exposing INVPCID only when PCID is exposed */
        best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
        if (vmx_invpcid_supported() &&
            best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&
            guest_cpuid_has_pcid(vcpu)) {
+               exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
                exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
                vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
                             exec_control);
        } else {
-               exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
-               vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
-                            exec_control);
+               if (cpu_has_secondary_exec_ctrls()) {
+                       exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+                       exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
+                       vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
+                                    exec_control);
+               }
                if (best)
                        best->ebx &= ~bit(X86_FEATURE_INVPCID);
        }
index 224a7e7..4f76417 100644 (file)
@@ -5781,6 +5781,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        int pending_vec, max_bits, idx;
        struct desc_ptr dt;
 
+       if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
+               return -EINVAL;
+
        dt.size = sregs->idt.limit;
        dt.address = sregs->idt.base;
        kvm_x86_ops->set_idt(vcpu, &dt);
index 0777f04..60f926c 100644 (file)
@@ -197,7 +197,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
        }
 
        if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1
-                                       || vmflag == VM_HUGETLB) {
+                                       || vmflag & VM_HUGETLB) {
                local_flush_tlb();
                goto flush_all;
        }
index 41bd2a2..b914e20 100644 (file)
@@ -115,6 +115,16 @@ static void sata_revid_read(struct sim_dev_reg *reg, u32 *value)
        reg_read(reg, value);
 }
 
+static void reg_noirq_read(struct sim_dev_reg *reg, u32 *value)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&pci_config_lock, flags);
+       /* force interrupt pin value to 0 */
+       *value = reg->sim_reg.value & 0xfff00ff;
+       raw_spin_unlock_irqrestore(&pci_config_lock, flags);
+}
+
 static struct sim_dev_reg bus1_fixups[] = {
        DEFINE_REG(2, 0, 0x10, (16*MB), reg_init, reg_read, reg_write)
        DEFINE_REG(2, 0, 0x14, (256), reg_init, reg_read, reg_write)
@@ -144,6 +154,7 @@ static struct sim_dev_reg bus1_fixups[] = {
        DEFINE_REG(11, 5, 0x10, (64*KB), reg_init, reg_read, reg_write)
        DEFINE_REG(11, 6, 0x10, (256), reg_init, reg_read, reg_write)
        DEFINE_REG(11, 7, 0x10, (64*KB), reg_init, reg_read, reg_write)
+       DEFINE_REG(11, 7, 0x3c, 256, reg_init, reg_noirq_read, reg_write)
        DEFINE_REG(12, 0, 0x10, (128*KB), reg_init, reg_read, reg_write)
        DEFINE_REG(12, 0, 0x14, (256), reg_init, reg_read, reg_write)
        DEFINE_REG(12, 1, 0x10, (1024), reg_init, reg_read, reg_write)
@@ -161,8 +172,10 @@ static struct sim_dev_reg bus1_fixups[] = {
        DEFINE_REG(16, 0, 0x10, (64*KB), reg_init, reg_read, reg_write)
        DEFINE_REG(16, 0, 0x14, (64*MB), reg_init, reg_read, reg_write)
        DEFINE_REG(16, 0, 0x18, (64*MB), reg_init, reg_read, reg_write)
+       DEFINE_REG(16, 0, 0x3c, 256, reg_init, reg_noirq_read, reg_write)
        DEFINE_REG(17, 0, 0x10, (128*KB), reg_init, reg_read, reg_write)
        DEFINE_REG(18, 0, 0x10, (1*KB), reg_init, reg_read, reg_write)
+       DEFINE_REG(18, 0, 0x3c, 256, reg_init, reg_noirq_read, reg_write)
 };
 
 static void __init init_sim_regs(void)
index 4c61b52..92525cb 100644 (file)
 #include <asm/i8259.h>
 #include <asm/io.h>
 #include <asm/io_apic.h>
+#include <asm/emergency-restart.h>
 
 static int ce4100_i8042_detect(void)
 {
        return 0;
 }
 
+/*
+ * The CE4100 platform has an internal 8051 Microcontroller which is
+ * responsible for signaling to the external Power Management Unit the
+ * intention to reset, reboot or power off the system. This 8051 device has
+ * its command register mapped at I/O port 0xcf9 and the value 0x4 is used
+ * to power off the system.
+ */
+static void ce4100_power_off(void)
+{
+       outb(0x4, 0xcf9);
+}
+
 #ifdef CONFIG_SERIAL_8250
 
 static unsigned int mem_serial_in(struct uart_port *p, int offset)
@@ -139,8 +152,19 @@ void __init x86_ce4100_early_setup(void)
        x86_init.mpparse.find_smp_config = x86_init_noop;
        x86_init.pci.init = ce4100_pci_init;
 
+       /*
+        * By default, the reboot method is ACPI which is supported by the
+        * CE4100 bootloader CEFDK using FADT.ResetReg Address and ResetValue
+        * the bootloader will however issue a system power off instead of
+        * reboot. By using BOOT_KBD we ensure proper system reboot as
+        * expected.
+        */
+       reboot_type = BOOT_KBD;
+
 #ifdef CONFIG_X86_IO_APIC
        x86_init.pci.init_irq = sdv_pci_init;
        x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc_nocheck;
 #endif
+
+       pm_power_off = ce4100_power_off;
 }
index 8b6dc5b..f71eac3 100644 (file)
@@ -52,11 +52,17 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
                           rq_end_io_fn *done)
 {
        int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
+       bool is_pm_resume;
 
        WARN_ON(irqs_disabled());
 
        rq->rq_disk = bd_disk;
        rq->end_io = done;
+       /*
+        * need to check this before __blk_run_queue(), because rq can
+        * be freed before that returns.
+        */
+       is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME;
 
        spin_lock_irq(q->queue_lock);
 
@@ -71,7 +77,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
        __elv_add_request(q, rq, where);
        __blk_run_queue(q);
        /* the queue is stopped so it won't be run */
-       if (rq->cmd_type == REQ_TYPE_PM_RESUME)
+       if (is_pm_resume)
                q->request_fn(q);
        spin_unlock_irq(q->queue_lock);
 }
index 671d4d6..7bdd61b 100644 (file)
@@ -137,13 +137,18 @@ static void cryptd_queue_worker(struct work_struct *work)
        struct crypto_async_request *req, *backlog;
 
        cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
-       /* Only handle one request at a time to avoid hogging crypto
-        * workqueue. preempt_disable/enable is used to prevent
-        * being preempted by cryptd_enqueue_request() */
+       /*
+        * Only handle one request at a time to avoid hogging crypto workqueue.
+        * preempt_disable/enable is used to prevent being preempted by
+        * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
+        * cryptd_enqueue_request() being accessed from software interrupts.
+        */
+       local_bh_disable();
        preempt_disable();
        backlog = crypto_get_backlog(&cpu_queue->queue);
        req = crypto_dequeue_request(&cpu_queue->queue);
        preempt_enable();
+       local_bh_enable();
 
        if (!req)
                return;
index b1ae480..b7078af 100644 (file)
@@ -238,7 +238,7 @@ static int __devexit ahci_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int ahci_suspend(struct device *dev)
 {
        struct ahci_platform_data *pdata = dev_get_platdata(dev);
index fd9ecf7..5b0ba3f 100644 (file)
@@ -1105,10 +1105,15 @@ static int ata_acpi_bind_device(struct ata_port *ap, struct scsi_device *sdev,
        struct acpi_device *acpi_dev;
        struct acpi_device_power_state *states;
 
-       if (ap->flags & ATA_FLAG_ACPI_SATA)
-               ata_dev = &ap->link.device[sdev->channel];
-       else
+       if (ap->flags & ATA_FLAG_ACPI_SATA) {
+               if (!sata_pmp_attached(ap))
+                       ata_dev = &ap->link.device[sdev->id];
+               else
+                       ata_dev = &ap->pmp_link[sdev->channel].device[sdev->id];
+       }
+       else {
                ata_dev = &ap->link.device[sdev->id];
+       }
 
        *handle = ata_dev_acpi_handle(ata_dev);
 
index 3cc7096..f46fbd3 100644 (file)
@@ -2942,6 +2942,10 @@ const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
 
        if (xfer_mode == t->mode)
                return t;
+
+       WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
+                       __func__, xfer_mode);
+
        return NULL;
 }
 
index e3bda07..a6df6a3 100644 (file)
@@ -1052,6 +1052,8 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
 {
        sdev->use_10_for_rw = 1;
        sdev->use_10_for_ms = 1;
+       sdev->no_report_opcodes = 1;
+       sdev->no_write_same = 1;
 
        /* Schedule policy is determined by ->qc_defer() callback and
         * it needs to see every deferred qc.  Set dev_blocked to 1 to
index 26201eb..371fd2c 100644 (file)
@@ -317,6 +317,12 @@ static int cf_init(struct arasan_cf_dev *acdev)
                return ret;
        }
 
+       ret = clk_set_rate(acdev->clk, 166000000);
+       if (ret) {
+               dev_warn(acdev->host->dev, "clock set rate failed");
+               return ret;
+       }
+
        spin_lock_irqsave(&acdev->host->lock, flags);
        /* configure CF interface clock */
        writel((pdata->cf_if_clk <= CF_IF_CLK_200M) ? pdata->cf_if_clk :
@@ -908,7 +914,7 @@ static int __devexit arasan_cf_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int arasan_cf_suspend(struct device *dev)
 {
        struct ata_host *host = dev_get_drvdata(dev);
index 0d7c4c2..400bf1c 100644 (file)
@@ -260,7 +260,7 @@ static const struct of_device_id ahci_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, ahci_of_match);
 
-static int __init ahci_highbank_probe(struct platform_device *pdev)
+static int __devinit ahci_highbank_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct ahci_host_priv *hpriv;
@@ -378,7 +378,7 @@ static int __devexit ahci_highbank_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int ahci_highbank_suspend(struct device *dev)
 {
        struct ata_host *host = dev_get_drvdata(dev);
index 44a4256..08608de 100644 (file)
@@ -142,6 +142,39 @@ static int k2_sata_scr_write(struct ata_link *link,
        return 0;
 }
 
+static int k2_sata_softreset(struct ata_link *link,
+                            unsigned int *class, unsigned long deadline)
+{
+       u8 dmactl;
+       void __iomem *mmio = link->ap->ioaddr.bmdma_addr;
+
+       dmactl = readb(mmio + ATA_DMA_CMD);
+
+       /* Clear the start bit */
+       if (dmactl & ATA_DMA_START) {
+               dmactl &= ~ATA_DMA_START;
+               writeb(dmactl, mmio + ATA_DMA_CMD);
+       }
+
+       return ata_sff_softreset(link, class, deadline);
+}
+
+static int k2_sata_hardreset(struct ata_link *link,
+                            unsigned int *class, unsigned long deadline)
+{
+       u8 dmactl;
+       void __iomem *mmio = link->ap->ioaddr.bmdma_addr;
+
+       dmactl = readb(mmio + ATA_DMA_CMD);
+
+       /* Clear the start bit */
+       if (dmactl & ATA_DMA_START) {
+               dmactl &= ~ATA_DMA_START;
+               writeb(dmactl, mmio + ATA_DMA_CMD);
+       }
+
+       return sata_sff_hardreset(link, class, deadline);
+}
 
 static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
 {
@@ -346,6 +379,8 @@ static struct scsi_host_template k2_sata_sht = {
 
 static struct ata_port_operations k2_sata_ops = {
        .inherits               = &ata_bmdma_port_ops,
+       .softreset              = k2_sata_softreset,
+       .hardreset              = k2_sata_hardreset,
        .sff_tf_load            = k2_sata_tf_load,
        .sff_tf_read            = k2_sata_tf_read,
        .sff_check_status       = k2_stat_check_status,
index 8727e9c..72c776f 100644 (file)
@@ -83,9 +83,16 @@ EXPORT_SYMBOL_GPL(platform_get_resource);
  */
 int platform_get_irq(struct platform_device *dev, unsigned int num)
 {
+#ifdef CONFIG_SPARC
+       /* sparc does not have irqs represented as IORESOURCE_IRQ resources */
+       if (!dev || num >= dev->archdata.num_irqs)
+               return -ENXIO;
+       return dev->archdata.irqs[num];
+#else
        struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num);
 
        return r ? r->start : -ENXIO;
+#endif
 }
 EXPORT_SYMBOL_GPL(platform_get_irq);
 
index 74a67e0..fbbd4ed 100644 (file)
@@ -451,7 +451,7 @@ int dev_pm_qos_add_ancestor_request(struct device *dev,
        if (ancestor)
                error = dev_pm_qos_add_request(ancestor, req, value);
 
-       if (error)
+       if (error < 0)
                req->dev = NULL;
 
        return error;
index 3804a0a..9fe4f18 100644 (file)
@@ -935,7 +935,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
 
        /* cf. http://lkml.org/lkml/2006/10/31/28 */
        if (!fastfail)
-               q->request_fn(q);
+               __blk_run_queue(q);
 }
 
 static void
index 1c49d71..2ddd64a 100644 (file)
@@ -4330,6 +4330,7 @@ out_unreg_region:
 out_unreg_blkdev:
        unregister_blkdev(FLOPPY_MAJOR, "fd");
 out_put_disk:
+       destroy_workqueue(floppy_wq);
        for (drive = 0; drive < N_DRIVE; drive++) {
                if (!disks[drive])
                        break;
@@ -4340,7 +4341,6 @@ out_put_disk:
                }
                put_disk(disks[drive]);
        }
-       destroy_workqueue(floppy_wq);
        return err;
 }
 
@@ -4555,6 +4555,8 @@ static void __exit floppy_module_exit(void)
        unregister_blkdev(FLOPPY_MAJOR, "fd");
        platform_driver_unregister(&floppy_driver);
 
+       destroy_workqueue(floppy_wq);
+
        for (drive = 0; drive < N_DRIVE; drive++) {
                del_timer_sync(&motor_off_timer[drive]);
 
@@ -4578,7 +4580,6 @@ static void __exit floppy_module_exit(void)
 
        cancel_delayed_work_sync(&fd_timeout);
        cancel_delayed_work_sync(&fd_timer);
-       destroy_workqueue(floppy_wq);
 
        if (atomic_read(&usage_count))
                floppy_release_irq_and_dma();
index adc6f36..9694dd9 100644 (file)
@@ -559,7 +559,7 @@ static void mtip_timeout_function(unsigned long int data)
        struct mtip_cmd *command;
        int tag, cmdto_cnt = 0;
        unsigned int bit, group;
-       unsigned int num_command_slots = port->dd->slot_groups * 32;
+       unsigned int num_command_slots;
        unsigned long to, tagaccum[SLOTBITS_IN_LONGS];
 
        if (unlikely(!port))
@@ -572,6 +572,7 @@ static void mtip_timeout_function(unsigned long int data)
        }
        /* clear the tag accumulator */
        memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
+       num_command_slots = port->dd->slot_groups * 32;
 
        for (tag = 0; tag < num_command_slots; tag++) {
                /*
@@ -2218,8 +2219,8 @@ static int exec_drive_taskfile(struct driver_data *dd,
                fis.device);
 
        /* check for erase mode support during secure erase.*/
-       if ((fis.command == ATA_CMD_SEC_ERASE_UNIT)
-                                       && (outbuf[0] & MTIP_SEC_ERASE_MODE)) {
+       if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) && outbuf &&
+                                       (outbuf[0] & MTIP_SEC_ERASE_MODE)) {
                erasemode = 1;
        }
 
@@ -2439,7 +2440,7 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
  * return value
  *     None
  */
-static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
+static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
                              int nsect, int nents, int tag, void *callback,
                              void *data, int dir)
 {
@@ -2447,6 +2448,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
        struct mtip_port *port = dd->port;
        struct mtip_cmd *command = &port->commands[tag];
        int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+       u64 start = sector;
 
        /* Map the scatter list for DMA access */
        nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
@@ -2465,8 +2467,12 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
        fis->opts        = 1 << 7;
        fis->command     =
                (dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE);
-       *((unsigned int *) &fis->lba_low) = (start & 0xFFFFFF);
-       *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xFFFFFF);
+       fis->lba_low     = start & 0xFF;
+       fis->lba_mid     = (start >> 8) & 0xFF;
+       fis->lba_hi      = (start >> 16) & 0xFF;
+       fis->lba_low_ex  = (start >> 24) & 0xFF;
+       fis->lba_mid_ex  = (start >> 32) & 0xFF;
+       fis->lba_hi_ex   = (start >> 40) & 0xFF;
        fis->device      = 1 << 6;
        fis->features    = nsect & 0xFF;
        fis->features_ex = (nsect >> 8) & 0xFF;
index 5f4a917..b174264 100644 (file)
@@ -34,7 +34,7 @@
 #define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET  0x48
 
 /* check for erase mode support during secure erase */
-#define MTIP_SEC_ERASE_MODE     0x3
+#define MTIP_SEC_ERASE_MODE     0x2
 
 /* # of times to retry timed out/failed IOs */
 #define MTIP_MAX_RETRIES       2
@@ -155,14 +155,14 @@ enum {
        MTIP_DDF_REBUILD_FAILED_BIT = 8,
 };
 
-__packed struct smart_attr{
+struct smart_attr {
        u8 attr_id;
        u16 flags;
        u8 cur;
        u8 worst;
        u32 data;
        u8 res[3];
-};
+} __packed;
 
 /* Register Frame Information Structure (FIS), host to device. */
 struct host_to_dev_fis {
index fc2de55..b00000e 100644 (file)
@@ -67,6 +67,7 @@ static struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x13d3, 0x3304) },
        { USB_DEVICE(0x0930, 0x0215) },
        { USB_DEVICE(0x0489, 0xE03D) },
+       { USB_DEVICE(0x0489, 0xE027) },
 
        /* Atheros AR9285 Malbec with sflash firmware */
        { USB_DEVICE(0x03F0, 0x311D) },
index debda27..ee82f2f 100644 (file)
@@ -124,6 +124,7 @@ static struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
        { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
        { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
+       { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
 
        /* Atheros AR9285 Malbec with sflash firmware */
        { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
index ff63560..0c48b0e 100644 (file)
 #include <linux/pm_runtime.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/platform_data/omap_ocp2scp.h>
+
+/**
+ * _count_resources - count for the number of resources
+ * @res: struct resource *
+ *
+ * Count and return the number of resources populated for the device that is
+ * connected to ocp2scp.
+ */
+static unsigned _count_resources(struct resource *res)
+{
+       int cnt = 0;
+
+       while (res->start != res->end) {
+               cnt++;
+               res++;
+       }
+
+       return cnt;
+}
 
 static int ocp2scp_remove_devices(struct device *dev, void *c)
 {
@@ -34,20 +54,62 @@ static int ocp2scp_remove_devices(struct device *dev, void *c)
 
 static int __devinit omap_ocp2scp_probe(struct platform_device *pdev)
 {
-       int                     ret;
-       struct device_node      *np = pdev->dev.of_node;
+       int ret;
+       unsigned res_cnt, i;
+       struct device_node *np = pdev->dev.of_node;
+       struct platform_device *pdev_child;
+       struct omap_ocp2scp_platform_data *pdata = pdev->dev.platform_data;
+       struct omap_ocp2scp_dev *dev;
 
        if (np) {
                ret = of_platform_populate(np, NULL, NULL, &pdev->dev);
                if (ret) {
-                       dev_err(&pdev->dev, "failed to add resources for ocp2scp child\n");
+                       dev_err(&pdev->dev,
+                           "failed to add resources for ocp2scp child\n");
                        goto err0;
                }
+       } else if (pdata) {
+               for (i = 0, dev = *pdata->devices; i < pdata->dev_cnt; i++,
+                   dev++) {
+                       res_cnt = _count_resources(dev->res);
+
+                       pdev_child = platform_device_alloc(dev->drv_name,
+                           PLATFORM_DEVID_AUTO);
+                       if (!pdev_child) {
+                               dev_err(&pdev->dev,
+                                 "failed to allocate mem for ocp2scp child\n");
+                               goto err0;
+                       }
+
+                       ret = platform_device_add_resources(pdev_child,
+                           dev->res, res_cnt);
+                       if (ret) {
+                               dev_err(&pdev->dev,
+                                "failed to add resources for ocp2scp child\n");
+                               goto err1;
+                       }
+
+                       pdev_child->dev.parent  = &pdev->dev;
+
+                       ret = platform_device_add(pdev_child);
+                       if (ret) {
+                               dev_err(&pdev->dev,
+                                  "failed to register ocp2scp child device\n");
+                               goto err1;
+                       }
+               }
+       } else {
+               dev_err(&pdev->dev, "OCP2SCP initialized without plat data\n");
+               return -EINVAL;
        }
+
        pm_runtime_enable(&pdev->dev);
 
        return 0;
 
+err1:
+       platform_device_put(pdev_child);
+
 err0:
        device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
 
index ca4a25e..e2c17d1 100644 (file)
@@ -40,7 +40,7 @@ void u8500_clk_init(void)
                                CLK_IS_ROOT|CLK_IGNORE_UNUSED,
                                32768);
        clk_register_clkdev(clk, "clk32k", NULL);
-       clk_register_clkdev(clk, NULL, "rtc-pl031");
+       clk_register_clkdev(clk, "apb_pclk", "rtc-pl031");
 
        /* PRCMU clocks */
        fw_version = prcmu_get_fw_version();
@@ -228,10 +228,17 @@ void u8500_clk_init(void)
 
        clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", U8500_CLKRST1_BASE,
                                BIT(2), 0);
+       clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.1");
+
        clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", U8500_CLKRST1_BASE,
                                BIT(3), 0);
+       clk_register_clkdev(clk, "apb_pclk", "msp0");
+       clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.0");
+
        clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", U8500_CLKRST1_BASE,
                                BIT(4), 0);
+       clk_register_clkdev(clk, "apb_pclk", "msp1");
+       clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.1");
 
        clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", U8500_CLKRST1_BASE,
                                BIT(5), 0);
@@ -239,6 +246,7 @@ void u8500_clk_init(void)
 
        clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", U8500_CLKRST1_BASE,
                                BIT(6), 0);
+       clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.2");
 
        clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", U8500_CLKRST1_BASE,
                                BIT(7), 0);
@@ -246,6 +254,7 @@ void u8500_clk_init(void)
 
        clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", U8500_CLKRST1_BASE,
                                BIT(8), 0);
+       clk_register_clkdev(clk, "apb_pclk", "slimbus0");
 
        clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", U8500_CLKRST1_BASE,
                                BIT(9), 0);
@@ -255,11 +264,16 @@ void u8500_clk_init(void)
 
        clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", U8500_CLKRST1_BASE,
                                BIT(10), 0);
+       clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.4");
+
        clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", U8500_CLKRST1_BASE,
                                BIT(11), 0);
+       clk_register_clkdev(clk, "apb_pclk", "msp3");
+       clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.3");
 
        clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", U8500_CLKRST2_BASE,
                                BIT(0), 0);
+       clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.3");
 
        clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", U8500_CLKRST2_BASE,
                                BIT(1), 0);
@@ -279,12 +293,13 @@ void u8500_clk_init(void)
 
        clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", U8500_CLKRST2_BASE,
                                BIT(5), 0);
+       clk_register_clkdev(clk, "apb_pclk", "msp2");
+       clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.2");
 
        clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", U8500_CLKRST2_BASE,
                                BIT(6), 0);
        clk_register_clkdev(clk, "apb_pclk", "sdi1");
 
-
        clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", U8500_CLKRST2_BASE,
                                BIT(7), 0);
        clk_register_clkdev(clk, "apb_pclk", "sdi3");
@@ -316,10 +331,15 @@ void u8500_clk_init(void)
 
        clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", U8500_CLKRST3_BASE,
                                BIT(1), 0);
+       clk_register_clkdev(clk, "apb_pclk", "ssp0");
+
        clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", U8500_CLKRST3_BASE,
                                BIT(2), 0);
+       clk_register_clkdev(clk, "apb_pclk", "ssp1");
+
        clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", U8500_CLKRST3_BASE,
                                BIT(3), 0);
+       clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.0");
 
        clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", U8500_CLKRST3_BASE,
                                BIT(4), 0);
@@ -401,10 +421,17 @@ void u8500_clk_init(void)
 
        clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk",
                        U8500_CLKRST1_BASE, BIT(2), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "nmk-i2c.1");
+
        clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk",
                        U8500_CLKRST1_BASE, BIT(3), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "msp0");
+       clk_register_clkdev(clk, NULL, "ux500-msp-i2s.0");
+
        clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk",
                        U8500_CLKRST1_BASE, BIT(4), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "msp1");
+       clk_register_clkdev(clk, NULL, "ux500-msp-i2s.1");
 
        clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmcclk",
                        U8500_CLKRST1_BASE, BIT(5), CLK_SET_RATE_GATE);
@@ -412,17 +439,25 @@ void u8500_clk_init(void)
 
        clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk",
                        U8500_CLKRST1_BASE, BIT(6), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "nmk-i2c.2");
+
        clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk",
-                       U8500_CLKRST1_BASE, BIT(3), CLK_SET_RATE_GATE);
-       /* FIXME: Redefinition of BIT(3). */
+                       U8500_CLKRST1_BASE, BIT(8), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "slimbus0");
+
        clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk",
                        U8500_CLKRST1_BASE, BIT(9), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "nmk-i2c.4");
+
        clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk",
                        U8500_CLKRST1_BASE, BIT(10), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "msp3");
+       clk_register_clkdev(clk, NULL, "ux500-msp-i2s.3");
 
        /* Periph2 */
        clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk",
                        U8500_CLKRST2_BASE, BIT(0), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "nmk-i2c.3");
 
        clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmcclk",
                        U8500_CLKRST2_BASE, BIT(2), CLK_SET_RATE_GATE);
@@ -430,6 +465,8 @@ void u8500_clk_init(void)
 
        clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk",
                        U8500_CLKRST2_BASE, BIT(3), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "msp2");
+       clk_register_clkdev(clk, NULL, "ux500-msp-i2s.2");
 
        clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmcclk",
                        U8500_CLKRST2_BASE, BIT(4), CLK_SET_RATE_GATE);
@@ -450,10 +487,15 @@ void u8500_clk_init(void)
        /* Periph3 */
        clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk",
                        U8500_CLKRST3_BASE, BIT(1), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "ssp0");
+
        clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk",
                        U8500_CLKRST3_BASE, BIT(2), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "ssp1");
+
        clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk",
                        U8500_CLKRST3_BASE, BIT(3), CLK_SET_RATE_GATE);
+       clk_register_clkdev(clk, NULL, "nmk-i2c.0");
 
        clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmcclk",
                        U8500_CLKRST3_BASE, BIT(4), CLK_SET_RATE_GATE);
index 8d48047..8c41396 100644 (file)
@@ -33,7 +33,7 @@
  *             detection. The mods to Rev F required more family
  *             information detection.
  *
- *     Changes/Fixes by Borislav Petkov <borislav.petkov@amd.com>:
+ *     Changes/Fixes by Borislav Petkov <bp@alien8.de>:
  *             - misc fixes and code cleanups
  *
  * This module is based on the following documents
index 6c86f6e..351945f 100644 (file)
@@ -5,7 +5,7 @@
  *
  * 2007 (c) MontaVista Software, Inc.
  * 2010 (c) Advanced Micro Devices Inc.
- *         Borislav Petkov <borislav.petkov@amd.com>
+ *         Borislav Petkov <bp@alien8.de>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 66b5151..2ae78f2 100644 (file)
@@ -6,7 +6,7 @@
  * This file may be distributed under the terms of the GNU General Public
  * License version 2.
  *
- * Copyright (c) 2010:  Borislav Petkov <borislav.petkov@amd.com>
+ * Copyright (c) 2010:  Borislav Petkov <bp@alien8.de>
  *                     Advanced Micro Devices Inc.
  */
 
@@ -168,6 +168,6 @@ module_init(edac_init_mce_inject);
 module_exit(edac_exit_mce_inject);
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Borislav Petkov <borislav.petkov@amd.com>");
+MODULE_AUTHOR("Borislav Petkov <bp@alien8.de>");
 MODULE_AUTHOR("AMD Inc.");
 MODULE_DESCRIPTION("MCE injection facility for testing MCE decoding");
index 1162d6b..bb1b392 100644 (file)
@@ -1546,6 +1546,8 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
        struct sbp2_logical_unit *lu = sdev->hostdata;
 
        sdev->use_10_for_rw = 1;
+       sdev->no_report_opcodes = 1;
+       sdev->no_write_same = 1;
 
        if (sbp2_param_exclusive_login)
                sdev->manage_start_stop = 1;
index d055cee..47150f5 100644 (file)
@@ -47,7 +47,7 @@ if GPIOLIB
 
 config OF_GPIO
        def_bool y
-       depends on OF && !SPARC
+       depends on OF
 
 config DEBUG_GPIO
        bool "Debug GPIO calls"
@@ -466,7 +466,7 @@ config GPIO_ADP5588_IRQ
 
 config GPIO_ADNP
        tristate "Avionic Design N-bit GPIO expander"
-       depends on I2C && OF
+       depends on I2C && OF_GPIO
        help
          This option enables support for N GPIOs found on Avionic Design
          I2C GPIO expanders. The register space will be extended by powers
index 0f42518..ce1c847 100644 (file)
@@ -77,7 +77,7 @@ struct mcp23s08_driver_data {
 
 /*----------------------------------------------------------------------*/
 
-#ifdef CONFIG_I2C
+#if IS_ENABLED(CONFIG_I2C)
 
 static int mcp23008_read(struct mcp23s08 *mcp, unsigned reg)
 {
@@ -399,7 +399,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
                break;
 #endif /* CONFIG_SPI_MASTER */
 
-#ifdef CONFIG_I2C
+#if IS_ENABLED(CONFIG_I2C)
        case MCP_TYPE_008:
                mcp->ops = &mcp23008_ops;
                mcp->chip.ngpio = 8;
@@ -473,7 +473,7 @@ fail:
 
 /*----------------------------------------------------------------------*/
 
-#ifdef CONFIG_I2C
+#if IS_ENABLED(CONFIG_I2C)
 
 static int __devinit mcp230xx_probe(struct i2c_client *client,
                                    const struct i2c_device_id *id)
index cf7afb9..be65c04 100644 (file)
@@ -92,6 +92,11 @@ static inline void __iomem *mvebu_gpioreg_out(struct mvebu_gpio_chip *mvchip)
        return mvchip->membase + GPIO_OUT_OFF;
 }
 
+static inline void __iomem *mvebu_gpioreg_blink(struct mvebu_gpio_chip *mvchip)
+{
+       return mvchip->membase + GPIO_BLINK_EN_OFF;
+}
+
 static inline void __iomem *mvebu_gpioreg_io_conf(struct mvebu_gpio_chip *mvchip)
 {
        return mvchip->membase + GPIO_IO_CONF_OFF;
@@ -206,6 +211,23 @@ static int mvebu_gpio_get(struct gpio_chip *chip, unsigned pin)
        return (u >> pin) & 1;
 }
 
+static void mvebu_gpio_blink(struct gpio_chip *chip, unsigned pin, int value)
+{
+       struct mvebu_gpio_chip *mvchip =
+               container_of(chip, struct mvebu_gpio_chip, chip);
+       unsigned long flags;
+       u32 u;
+
+       spin_lock_irqsave(&mvchip->lock, flags);
+       u = readl_relaxed(mvebu_gpioreg_blink(mvchip));
+       if (value)
+               u |= 1 << pin;
+       else
+               u &= ~(1 << pin);
+       writel_relaxed(u, mvebu_gpioreg_blink(mvchip));
+       spin_unlock_irqrestore(&mvchip->lock, flags);
+}
+
 static int mvebu_gpio_direction_input(struct gpio_chip *chip, unsigned pin)
 {
        struct mvebu_gpio_chip *mvchip =
@@ -244,6 +266,7 @@ static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned pin,
        if (ret)
                return ret;
 
+       mvebu_gpio_blink(chip, pin, 0);
        mvebu_gpio_set(chip, pin, value);
 
        spin_lock_irqsave(&mvchip->lock, flags);
index ac91a33..6f58c81 100644 (file)
@@ -8,7 +8,7 @@ drm-y       :=  drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
                drm_context.o drm_dma.o \
                drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
                drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
-               drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
+               drm_agpsupport.o drm_scatter.o drm_pci.o \
                drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
                drm_crtc.o drm_modes.o drm_edid.o \
                drm_info.o drm_debugfs.o drm_encoder_slave.o \
@@ -16,6 +16,7 @@ drm-y       :=        drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
 
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
+drm-$(CONFIG_PCI) += ati_pcigart.o
 
 drm-usb-y   := drm_usb.o
 
index 0a54f65..3602731 100644 (file)
@@ -186,11 +186,11 @@ static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *
 
 static int ast_bo_move(struct ttm_buffer_object *bo,
                       bool evict, bool interruptible,
-                      bool no_wait_reserve, bool no_wait_gpu,
+                      bool no_wait_gpu,
                       struct ttm_mem_reg *new_mem)
 {
        int r;
-       r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+       r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
        return r;
 }
 
@@ -383,7 +383,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
        ast_ttm_placement(bo, pl_flag);
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
        if (ret)
                return ret;
 
@@ -406,7 +406,7 @@ int ast_bo_unpin(struct ast_bo *bo)
 
        for (i = 0; i < bo->placement.num_placement ; i++)
                bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
        if (ret)
                return ret;
 
@@ -431,7 +431,7 @@ int ast_bo_push_sysram(struct ast_bo *bo)
        for (i = 0; i < bo->placement.num_placement ; i++)
                bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
 
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
        if (ret) {
                DRM_ERROR("pushing to VRAM failed\n");
                return ret;
index 90d7701..1413a26 100644 (file)
@@ -186,11 +186,11 @@ static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
 
 static int cirrus_bo_move(struct ttm_buffer_object *bo,
                       bool evict, bool interruptible,
-                      bool no_wait_reserve, bool no_wait_gpu,
+                      bool no_wait_gpu,
                       struct ttm_mem_reg *new_mem)
 {
        int r;
-       r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+       r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
        return r;
 }
 
@@ -388,7 +388,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
        cirrus_ttm_placement(bo, pl_flag);
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
        if (ret)
                return ret;
 
@@ -411,7 +411,7 @@ int cirrus_bo_unpin(struct cirrus_bo *bo)
 
        for (i = 0; i < bo->placement.num_placement ; i++)
                bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
        if (ret)
                return ret;
 
@@ -436,7 +436,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
        for (i = 0; i < bo->placement.num_placement ; i++)
                bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
 
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
        if (ret) {
                DRM_ERROR("pushing to VRAM failed\n");
                return ret;
index d6d0072..f2d667b 100644 (file)
@@ -559,11 +559,11 @@ int drm_connector_init(struct drm_device *dev,
        dev->mode_config.num_connector++;
 
        if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              dev->mode_config.edid_property,
                                              0);
 
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                      dev->mode_config.dpms_property, 0);
 
  out:
@@ -2928,27 +2928,6 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
 }
 EXPORT_SYMBOL(drm_property_destroy);
 
-void drm_connector_attach_property(struct drm_connector *connector,
-                              struct drm_property *property, uint64_t init_val)
-{
-       drm_object_attach_property(&connector->base, property, init_val);
-}
-EXPORT_SYMBOL(drm_connector_attach_property);
-
-int drm_connector_property_set_value(struct drm_connector *connector,
-                                 struct drm_property *property, uint64_t value)
-{
-       return drm_object_property_set_value(&connector->base, property, value);
-}
-EXPORT_SYMBOL(drm_connector_property_set_value);
-
-int drm_connector_property_get_value(struct drm_connector *connector,
-                                 struct drm_property *property, uint64_t *val)
-{
-       return drm_object_property_get_value(&connector->base, property, val);
-}
-EXPORT_SYMBOL(drm_connector_property_get_value);
-
 void drm_object_attach_property(struct drm_mode_object *obj,
                                struct drm_property *property,
                                uint64_t init_val)
@@ -3185,7 +3164,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
        /* Delete edid, when there is none. */
        if (!edid) {
                connector->edid_blob_ptr = NULL;
-               ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
+               ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0);
                return ret;
        }
 
@@ -3195,7 +3174,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
        if (!connector->edid_blob_ptr)
                return -EINVAL;
 
-       ret = drm_connector_property_set_value(connector,
+       ret = drm_object_property_set_value(&connector->base,
                                               dev->mode_config.edid_property,
                                               connector->edid_blob_ptr->base.id);
 
@@ -3262,7 +3241,7 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
 
        /* store the property value if successful */
        if (!ret)
-               drm_connector_property_set_value(connector, property, value);
+               drm_object_property_set_value(&connector->base, property, value);
        return ret;
 }
 
index 1fe719f..7b2d378 100644 (file)
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_edid.h>
 
+/**
+ * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
+ *                                             connector list
+ * @dev: drm device to operate on
+ *
+ * Some userspace presumes that the first connected connector is the main
+ * display, where it's supposed to display e.g. the login screen. For
+ * laptops, this should be the main panel. Use this function to sort all
+ * (eDP/LVDS) panels to the front of the connector list, instead of
+ * painstakingly trying to initialize them in the right order.
+ */
 void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
 {
        struct drm_connector *connector, *tmp;
@@ -82,22 +93,21 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
 
 /**
  * drm_helper_probe_single_connector_modes - get complete set of display modes
- * @dev: DRM device
+ * @connector: connector to probe
  * @maxX: max width for modes
  * @maxY: max height for modes
  *
  * LOCKING:
  * Caller must hold mode config lock.
  *
- * Based on @dev's mode_config layout, scan all the connectors and try to detect
- * modes on them.  Modes will first be added to the connector's probed_modes
- * list, then culled (based on validity and the @maxX, @maxY parameters) and
- * put into the normal modes list.
- *
- * Intended to be used either at bootup time or when major configuration
- * changes have occurred.
+ * Based on the helper callbacks implemented by @connector try to detect all
+ * valid modes.  Modes will first be added to the connector's probed_modes list,
+ * then culled (based on validity and the @maxX, @maxY parameters) and put into
+ * the normal modes list.
  *
- * FIXME: take into account monitor limits
+ * Intended to be use as a generic implementation of the ->probe() @connector
+ * callback for drivers that use the crtc helpers for output mode filtering and
+ * detection.
  *
  * RETURNS:
  * Number of modes found on @connector.
@@ -348,17 +358,24 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
 }
 
 /**
- * drm_crtc_set_mode - set a mode
+ * drm_crtc_helper_set_mode - internal helper to set a mode
  * @crtc: CRTC to program
  * @mode: mode to use
  * @x: horizontal offset into the surface
  * @y: vertical offset into the surface
+ * @old_fb: old framebuffer, for cleanup
  *
  * LOCKING:
  * Caller must hold mode config lock.
  *
  * Try to set @mode on @crtc.  Give @crtc and its associated connectors a chance
- * to fixup or reject the mode prior to trying to set it.
+ * to fixup or reject the mode prior to trying to set it. This is an internal
+ * helper that drivers could e.g. use to update properties that require the
+ * entire output pipe to be disabled and re-enabled in a new configuration. For
+ * example for changing whether audio is enabled on a hdmi link or for changing
+ * panel fitter or dither attributes. It is also called by the
+ * drm_crtc_helper_set_config() helper function to drive the mode setting
+ * sequence.
  *
  * RETURNS:
  * True if the mode was set successfully, or false otherwise.
@@ -514,20 +531,19 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
 
 /**
  * drm_crtc_helper_set_config - set a new config from userspace
- * @crtc: CRTC to setup
- * @crtc_info: user provided configuration
- * @new_mode: new mode to set
- * @connector_set: set of connectors for the new config
- * @fb: new framebuffer
+ * @set: mode set configuration
  *
  * LOCKING:
  * Caller must hold mode config lock.
  *
- * Setup a new configuration, provided by the user in @crtc_info, and enable
- * it.
+ * Setup a new configuration, provided by the upper layers (either an ioctl call
+ * from userspace or internally e.g. from the fbdev suppport code) in @set, and
+ * enable it. This is the main helper functions for drivers that implement
+ * kernel mode setting with the crtc helper functions and the assorted
+ * ->prepare(), ->modeset() and ->commit() helper callbacks.
  *
  * RETURNS:
- * Zero. (FIXME)
+ * Returns 0 on success, -ERRNO on failure.
  */
 int drm_crtc_helper_set_config(struct drm_mode_set *set)
 {
@@ -823,12 +839,14 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
 }
 
 /**
- * drm_helper_connector_dpms
- * @connector affected connector
- * @mode DPMS mode
+ * drm_helper_connector_dpms() - connector dpms helper implementation
+ * @connector: affected connector
+ * @mode: DPMS mode
  *
- * Calls the low-level connector DPMS function, then
- * calls appropriate encoder and crtc DPMS functions as well
+ * This is the main helper function provided by the crtc helper framework for
+ * implementing the DPMS connector attribute. It computes the new desired DPMS
+ * state for all encoders and crtcs in the output mesh and calls the ->dpms()
+ * callback provided by the driver appropriately.
  */
 void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
 {
index 3c4cccd..89e1966 100644 (file)
 #include <drm/drm_dp_helper.h>
 #include <drm/drmP.h>
 
+/**
+ * DOC: dp helpers
+ *
+ * These functions contain some common logic and helpers at various abstraction
+ * levels to deal with Display Port sink devices and related things like DP aux
+ * channel transfers, EDID reading over DP aux channels, decoding certain DPCD
+ * blocks, ...
+ */
+
 /* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
 static int
 i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
@@ -193,6 +202,18 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
        return 0;
 }
 
+/**
+ * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
+ * @adapter: i2c adapter to register
+ *
+ * This registers an i2c adapater that uses dp aux channel as it's underlaying
+ * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
+ * and store it in the algo_data member of the @adapter argument. This will be
+ * used by the i2c over dp aux algorithm to drive the hardware.
+ *
+ * RETURNS:
+ * 0 on success, -ERRNO on failure.
+ */
 int
 i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
 {
index 011bd4f..5a3770f 100644 (file)
@@ -1639,7 +1639,7 @@ parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
        if (len >= 12)
                connector->audio_latency[1] = db[12];
 
-       DRM_LOG_KMS("HDMI: DVI dual %d, "
+       DRM_DEBUG_KMS("HDMI: DVI dual %d, "
                    "max TMDS clock %d, "
                    "latency present %d %d, "
                    "video latency %d %d, "
index 2c44af6..954d175 100644 (file)
@@ -45,6 +45,15 @@ MODULE_LICENSE("GPL and additional rights");
 
 static LIST_HEAD(kernel_fb_helper_list);
 
+/**
+ * DOC: fbdev helpers
+ *
+ * The fb helper functions are useful to provide an fbdev on top of a drm kernel
+ * mode setting driver. They can be used mostly independantely from the crtc
+ * helper functions used by many drivers to implement the kernel mode setting
+ * interfaces.
+ */
+
 /* simple single crtc case helper function */
 int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
 {
@@ -339,7 +348,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
                for (j = 0; j < fb_helper->connector_count; j++) {
                        connector = fb_helper->connector_info[j]->connector;
                        connector->funcs->dpms(connector, dpms_mode);
-                       drm_connector_property_set_value(connector,
+                       drm_object_property_set_value(&connector->base,
                                dev->mode_config.dpms_property, dpms_mode);
                }
        }
@@ -1302,12 +1311,14 @@ out:
 
 /**
  * drm_helper_initial_config - setup a sane initial connector configuration
- * @dev: DRM device
+ * @fb_helper: fb_helper device struct
+ * @bpp_sel: bpp value to use for the framebuffer configuration
  *
  * LOCKING:
- * Called at init time, must take mode config lock.
+ * Called at init time by the driver to set up the @fb_helper initial
+ * configuration, must take the mode config lock.
  *
- * Scan the CRTCs and connectors and try to put together an initial setup.
+ * Scans the CRTCs and connectors and tries to put together an initial setup.
  * At the moment, this is a cloned configuration across all heads with
  * a new framebuffer object as the backing store.
  *
@@ -1341,7 +1352,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
 
 /**
  * drm_fb_helper_hotplug_event - respond to a hotplug notification by
- *                               probing all the outputs attached to the fb.
+ *                               probing all the outputs attached to the fb
  * @fb_helper: the drm_fb_helper
  *
  * LOCKING:
index 7ef1b67..133b413 100644 (file)
@@ -121,6 +121,8 @@ int drm_open(struct inode *inode, struct file *filp)
        int minor_id = iminor(inode);
        struct drm_minor *minor;
        int retcode = 0;
+       int need_setup = 0;
+       struct address_space *old_mapping;
 
        minor = idr_find(&drm_minors_idr, minor_id);
        if (!minor)
@@ -132,23 +134,37 @@ int drm_open(struct inode *inode, struct file *filp)
        if (drm_device_is_unplugged(dev))
                return -ENODEV;
 
+       if (!dev->open_count++)
+               need_setup = 1;
+       mutex_lock(&dev->struct_mutex);
+       old_mapping = dev->dev_mapping;
+       if (old_mapping == NULL)
+               dev->dev_mapping = &inode->i_data;
+       /* ihold ensures nobody can remove inode with our i_data */
+       ihold(container_of(dev->dev_mapping, struct inode, i_data));
+       inode->i_mapping = dev->dev_mapping;
+       filp->f_mapping = dev->dev_mapping;
+       mutex_unlock(&dev->struct_mutex);
+
        retcode = drm_open_helper(inode, filp, dev);
-       if (!retcode) {
-               atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
-               if (!dev->open_count++)
-                       retcode = drm_setup(dev);
-       }
-       if (!retcode) {
-               mutex_lock(&dev->struct_mutex);
-               if (dev->dev_mapping == NULL)
-                       dev->dev_mapping = &inode->i_data;
-               /* ihold ensures nobody can remove inode with our i_data */
-               ihold(container_of(dev->dev_mapping, struct inode, i_data));
-               inode->i_mapping = dev->dev_mapping;
-               filp->f_mapping = dev->dev_mapping;
-               mutex_unlock(&dev->struct_mutex);
+       if (retcode)
+               goto err_undo;
+       atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
+       if (need_setup) {
+               retcode = drm_setup(dev);
+               if (retcode)
+                       goto err_undo;
        }
+       return 0;
 
+err_undo:
+       mutex_lock(&dev->struct_mutex);
+       filp->f_mapping = old_mapping;
+       inode->i_mapping = old_mapping;
+       iput(container_of(dev->dev_mapping, struct inode, i_data));
+       dev->dev_mapping = old_mapping;
+       mutex_unlock(&dev->struct_mutex);
+       dev->open_count--;
        return retcode;
 }
 EXPORT_SYMBOL(drm_open);
index 5729e39..8025454 100644 (file)
@@ -67,7 +67,7 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
        hashed_key = hash_long(key, ht->order);
        DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
        h_list = &ht->table[hashed_key];
-       hlist_for_each_entry_rcu(entry, list, h_list, head)
+       hlist_for_each_entry(entry, list, h_list, head)
                DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
 }
 
@@ -81,7 +81,7 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
 
        hashed_key = hash_long(key, ht->order);
        h_list = &ht->table[hashed_key];
-       hlist_for_each_entry_rcu(entry, list, h_list, head) {
+       hlist_for_each_entry(entry, list, h_list, head) {
                if (entry->key == key)
                        return list;
                if (entry->key > key)
@@ -90,6 +90,24 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
        return NULL;
 }
 
+static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
+                                             unsigned long key)
+{
+       struct drm_hash_item *entry;
+       struct hlist_head *h_list;
+       struct hlist_node *list;
+       unsigned int hashed_key;
+
+       hashed_key = hash_long(key, ht->order);
+       h_list = &ht->table[hashed_key];
+       hlist_for_each_entry_rcu(entry, list, h_list, head) {
+               if (entry->key == key)
+                       return list;
+               if (entry->key > key)
+                       break;
+       }
+       return NULL;
+}
 
 int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
 {
@@ -102,7 +120,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
        hashed_key = hash_long(key, ht->order);
        h_list = &ht->table[hashed_key];
        parent = NULL;
-       hlist_for_each_entry_rcu(entry, list, h_list, head) {
+       hlist_for_each_entry(entry, list, h_list, head) {
                if (entry->key == key)
                        return -EINVAL;
                if (entry->key > key)
@@ -152,7 +170,7 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
 {
        struct hlist_node *list;
 
-       list = drm_ht_find_key(ht, key);
+       list = drm_ht_find_key_rcu(ht, key);
        if (!list)
                return -EINVAL;
 
index 2ba9d7f..19c01ca 100644 (file)
@@ -1021,6 +1021,8 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
 
        /* Send any queued vblank events, lest the natives grow disquiet */
        seq = drm_vblank_count_and_time(dev, crtc, &now);
+
+       spin_lock(&dev->event_lock);
        list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
                if (e->pipe != crtc)
                        continue;
@@ -1031,6 +1033,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
                drm_vblank_put(dev, e->pipe);
                send_vblank_event(dev, e, seq, &now);
        }
+       spin_unlock(&dev->event_lock);
 
        spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 }
index ba33144..754bc96 100644 (file)
@@ -470,7 +470,7 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
 {
        struct pci_dev *root;
        int pos;
-       u32 lnkcap, lnkcap2;
+       u32 lnkcap = 0, lnkcap2 = 0;
 
        *mask = 0;
        if (!dev->pdev)
index 05cd8fe..0229665 100644 (file)
@@ -182,7 +182,7 @@ static ssize_t dpms_show(struct device *device,
        uint64_t dpms_status;
        int ret;
 
-       ret = drm_connector_property_get_value(connector,
+       ret = drm_object_property_get_value(&connector->base,
                                            dev->mode_config.dpms_property,
                                            &dpms_status);
        if (ret)
@@ -277,7 +277,7 @@ static ssize_t subconnector_show(struct device *device,
                return 0;
        }
 
-       ret = drm_connector_property_get_value(connector, prop, &subconnector);
+       ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
        if (ret)
                return 0;
 
@@ -318,7 +318,7 @@ static ssize_t select_subconnector_show(struct device *device,
                return 0;
        }
 
-       ret = drm_connector_property_get_value(connector, prop, &subconnector);
+       ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
        if (ret)
                return 0;
 
index fc345d4..1d1f1e5 100644 (file)
@@ -10,6 +10,12 @@ config DRM_EXYNOS
          Choose this option if you have a Samsung SoC EXYNOS chipset.
          If M is selected the module will be called exynosdrm.
 
+config DRM_EXYNOS_IOMMU
+       bool "EXYNOS DRM IOMMU Support"
+       depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
+       help
+         Choose this option if you want to use IOMMU feature for DRM.
+
 config DRM_EXYNOS_DMABUF
        bool "EXYNOS DRM DMABUF"
        depends on DRM_EXYNOS
@@ -39,3 +45,27 @@ config DRM_EXYNOS_G2D
        depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
        help
          Choose this option if you want to use Exynos G2D for DRM.
+
+config DRM_EXYNOS_IPP
+       bool "Exynos DRM IPP"
+       depends on DRM_EXYNOS
+       help
+         Choose this option if you want to use IPP feature for DRM.
+
+config DRM_EXYNOS_FIMC
+       bool "Exynos DRM FIMC"
+       depends on DRM_EXYNOS_IPP
+       help
+         Choose this option if you want to use Exynos FIMC for DRM.
+
+config DRM_EXYNOS_ROTATOR
+       bool "Exynos DRM Rotator"
+       depends on DRM_EXYNOS_IPP
+       help
+         Choose this option if you want to use Exynos Rotator for DRM.
+
+config DRM_EXYNOS_GSC
+       bool "Exynos DRM GSC"
+       depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
+       help
+         Choose this option if you want to use Exynos GSC for DRM.
index eb651ca..639b49e 100644 (file)
@@ -8,6 +8,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
                exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
                exynos_drm_plane.o
 
+exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD)    += exynos_drm_fimd.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI)    += exynos_hdmi.o exynos_mixer.o \
@@ -15,5 +16,9 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI)   += exynos_hdmi.o exynos_mixer.o \
                                           exynos_drm_hdmi.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI)    += exynos_drm_vidi.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_G2D)     += exynos_drm_g2d.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_IPP)     += exynos_drm_ipp.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC)    += exynos_drm_fimc.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_GSC)     += exynos_drm_gsc.o
 
 obj-$(CONFIG_DRM_EXYNOS)               += exynosdrm.o
index 37e6ec7..bef43e0 100644 (file)
@@ -48,6 +48,7 @@ static struct i2c_device_id ddc_idtable[] = {
        { },
 };
 
+#ifdef CONFIG_OF
 static struct of_device_id hdmiddc_match_types[] = {
        {
                .compatible = "samsung,exynos5-hdmiddc",
@@ -55,12 +56,13 @@ static struct of_device_id hdmiddc_match_types[] = {
                /* end node */
        }
 };
+#endif
 
 struct i2c_driver ddc_driver = {
        .driver = {
                .name = "exynos-hdmiddc",
                .owner = THIS_MODULE,
-               .of_match_table = hdmiddc_match_types,
+               .of_match_table = of_match_ptr(hdmiddc_match_types),
        },
        .id_table       = ddc_idtable,
        .probe          = s5p_ddc_probe,
index 118c117..9601bad 100644 (file)
 static int lowlevel_buffer_allocate(struct drm_device *dev,
                unsigned int flags, struct exynos_drm_gem_buf *buf)
 {
-       dma_addr_t start_addr;
-       unsigned int npages, i = 0;
-       struct scatterlist *sgl;
        int ret = 0;
+       enum dma_attr attr;
+       unsigned int nr_pages;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       if (IS_NONCONTIG_BUFFER(flags)) {
-               DRM_DEBUG_KMS("not support allocation type.\n");
-               return -EINVAL;
-       }
-
        if (buf->dma_addr) {
                DRM_DEBUG_KMS("already allocated.\n");
                return 0;
        }
 
-       if (buf->size >= SZ_1M) {
-               npages = buf->size >> SECTION_SHIFT;
-               buf->page_size = SECTION_SIZE;
-       } else if (buf->size >= SZ_64K) {
-               npages = buf->size >> 16;
-               buf->page_size = SZ_64K;
-       } else {
-               npages = buf->size >> PAGE_SHIFT;
-               buf->page_size = PAGE_SIZE;
-       }
+       init_dma_attrs(&buf->dma_attrs);
 
-       buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
-       if (!buf->sgt) {
-               DRM_ERROR("failed to allocate sg table.\n");
-               return -ENOMEM;
-       }
+       /*
+        * if EXYNOS_BO_CONTIG, fully physically contiguous memory
+        * region will be allocated else physically contiguous
+        * as possible.
+        */
+       if (flags & EXYNOS_BO_CONTIG)
+               dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
 
-       ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
-       if (ret < 0) {
-               DRM_ERROR("failed to initialize sg table.\n");
-               kfree(buf->sgt);
-               buf->sgt = NULL;
-               return -ENOMEM;
-       }
+       /*
+        * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
+        * else cachable mapping.
+        */
+       if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
+               attr = DMA_ATTR_WRITE_COMBINE;
+       else
+               attr = DMA_ATTR_NON_CONSISTENT;
 
-       buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
-                       &buf->dma_addr, GFP_KERNEL);
-       if (!buf->kvaddr) {
-               DRM_ERROR("failed to allocate buffer.\n");
-               ret = -ENOMEM;
-               goto err1;
-       }
+       dma_set_attr(attr, &buf->dma_attrs);
+       dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
 
-       buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
+       buf->pages = dma_alloc_attrs(dev->dev, buf->size,
+                       &buf->dma_addr, GFP_KERNEL, &buf->dma_attrs);
        if (!buf->pages) {
-               DRM_ERROR("failed to allocate pages.\n");
-               ret = -ENOMEM;
-               goto err2;
+               DRM_ERROR("failed to allocate buffer.\n");
+               return -ENOMEM;
        }
 
-       sgl = buf->sgt->sgl;
-       start_addr = buf->dma_addr;
-
-       while (i < npages) {
-               buf->pages[i] = phys_to_page(start_addr);
-               sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
-               sg_dma_address(sgl) = start_addr;
-               start_addr += buf->page_size;
-               sgl = sg_next(sgl);
-               i++;
+       nr_pages = buf->size >> PAGE_SHIFT;
+       buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
+       if (!buf->sgt) {
+               DRM_ERROR("failed to get sg table.\n");
+               ret = -ENOMEM;
+               goto err_free_attrs;
        }
 
-       DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
-                       (unsigned long)buf->kvaddr,
+       DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
                        (unsigned long)buf->dma_addr,
                        buf->size);
 
        return ret;
-err2:
-       dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
-                       (dma_addr_t)buf->dma_addr);
+
+err_free_attrs:
+       dma_free_attrs(dev->dev, buf->size, buf->pages,
+                       (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
        buf->dma_addr = (dma_addr_t)NULL;
-err1:
-       sg_free_table(buf->sgt);
-       kfree(buf->sgt);
-       buf->sgt = NULL;
 
        return ret;
 }
@@ -125,23 +100,12 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
 {
        DRM_DEBUG_KMS("%s.\n", __FILE__);
 
-       /*
-        * release only physically continuous memory and
-        * non-continuous memory would be released by exynos
-        * gem framework.
-        */
-       if (IS_NONCONTIG_BUFFER(flags)) {
-               DRM_DEBUG_KMS("not support allocation type.\n");
-               return;
-       }
-
        if (!buf->dma_addr) {
                DRM_DEBUG_KMS("dma_addr is invalid.\n");
                return;
        }
 
-       DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
-                       (unsigned long)buf->kvaddr,
+       DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
                        (unsigned long)buf->dma_addr,
                        buf->size);
 
@@ -150,11 +114,8 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
        kfree(buf->sgt);
        buf->sgt = NULL;
 
-       kfree(buf->pages);
-       buf->pages = NULL;
-
-       dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
-                               (dma_addr_t)buf->dma_addr);
+       dma_free_attrs(dev->dev, buf->size, buf->pages,
+                               (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
        buf->dma_addr = (dma_addr_t)NULL;
 }
 
index 3388e4e..25cf162 100644 (file)
@@ -34,12 +34,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
 void exynos_drm_fini_buf(struct drm_device *dev,
                                struct exynos_drm_gem_buf *buffer);
 
-/* allocate physical memory region and setup sgt and pages. */
+/* allocate physical memory region and setup sgt. */
 int exynos_drm_alloc_buf(struct drm_device *dev,
                                struct exynos_drm_gem_buf *buf,
                                unsigned int flags);
 
-/* release physical memory region, sgt and pages. */
+/* release physical memory region, and sgt. */
 void exynos_drm_free_buf(struct drm_device *dev,
                                unsigned int flags,
                                struct exynos_drm_gem_buf *buffer);
index fce245f..2efa4b0 100644 (file)
@@ -236,16 +236,21 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
                        goto out;
                }
 
+               spin_lock_irq(&dev->event_lock);
                list_add_tail(&event->base.link,
                                &dev_priv->pageflip_event_list);
+               spin_unlock_irq(&dev->event_lock);
 
                crtc->fb = fb;
                ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y,
                                                    NULL);
                if (ret) {
                        crtc->fb = old_fb;
+
+                       spin_lock_irq(&dev->event_lock);
                        drm_vblank_put(dev, exynos_crtc->pipe);
                        list_del(&event->base.link);
+                       spin_unlock_irq(&dev->event_lock);
 
                        goto out;
                }
index fae1f2e..61d5a84 100644 (file)
 
 #include <linux/dma-buf.h>
 
-static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages,
-               unsigned int page_size)
+struct exynos_drm_dmabuf_attachment {
+       struct sg_table sgt;
+       enum dma_data_direction dir;
+};
+
+static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
+                                       struct device *dev,
+                                       struct dma_buf_attachment *attach)
 {
-       struct sg_table *sgt = NULL;
-       struct scatterlist *sgl;
-       int i, ret;
+       struct exynos_drm_dmabuf_attachment *exynos_attach;
 
-       sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
-       if (!sgt)
-               goto out;
+       exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
+       if (!exynos_attach)
+               return -ENOMEM;
 
-       ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL);
-       if (ret)
-               goto err_free_sgt;
+       exynos_attach->dir = DMA_NONE;
+       attach->priv = exynos_attach;
 
-       if (page_size < PAGE_SIZE)
-               page_size = PAGE_SIZE;
+       return 0;
+}
 
-       for_each_sg(sgt->sgl, sgl, nr_pages, i)
-               sg_set_page(sgl, pages[i], page_size, 0);
+static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
+                                       struct dma_buf_attachment *attach)
+{
+       struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
+       struct sg_table *sgt;
 
-       return sgt;
+       if (!exynos_attach)
+               return;
 
-err_free_sgt:
-       kfree(sgt);
-       sgt = NULL;
-out:
-       return NULL;
+       sgt = &exynos_attach->sgt;
+
+       if (exynos_attach->dir != DMA_NONE)
+               dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+                               exynos_attach->dir);
+
+       sg_free_table(sgt);
+       kfree(exynos_attach);
+       attach->priv = NULL;
 }
 
 static struct sg_table *
                exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
                                        enum dma_data_direction dir)
 {
+       struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
        struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
        struct drm_device *dev = gem_obj->base.dev;
        struct exynos_drm_gem_buf *buf;
+       struct scatterlist *rd, *wr;
        struct sg_table *sgt = NULL;
-       unsigned int npages;
-       int nents;
+       unsigned int i;
+       int nents, ret;
 
        DRM_DEBUG_PRIME("%s\n", __FILE__);
 
-       mutex_lock(&dev->struct_mutex);
+       if (WARN_ON(dir == DMA_NONE))
+               return ERR_PTR(-EINVAL);
+
+       /* just return current sgt if already requested. */
+       if (exynos_attach->dir == dir)
+               return &exynos_attach->sgt;
+
+       /* reattaching is not allowed. */
+       if (WARN_ON(exynos_attach->dir != DMA_NONE))
+               return ERR_PTR(-EBUSY);
 
        buf = gem_obj->buffer;
+       if (!buf) {
+               DRM_ERROR("buffer is null.\n");
+               return ERR_PTR(-ENOMEM);
+       }
 
-       /* there should always be pages allocated. */
-       if (!buf->pages) {
-               DRM_ERROR("pages is null.\n");
-               goto err_unlock;
+       sgt = &exynos_attach->sgt;
+
+       ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
+       if (ret) {
+               DRM_ERROR("failed to alloc sgt.\n");
+               return ERR_PTR(-ENOMEM);
        }
 
-       npages = buf->size / buf->page_size;
+       mutex_lock(&dev->struct_mutex);
 
-       sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
-       if (!sgt) {
-               DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n");
+       rd = buf->sgt->sgl;
+       wr = sgt->sgl;
+       for (i = 0; i < sgt->orig_nents; ++i) {
+               sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+               rd = sg_next(rd);
+               wr = sg_next(wr);
+       }
+
+       nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+       if (!nents) {
+               DRM_ERROR("failed to map sgl with iommu.\n");
+               sgt = ERR_PTR(-EIO);
                goto err_unlock;
        }
-       nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
 
-       DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
-                       npages, buf->size, buf->page_size);
+       exynos_attach->dir = dir;
+       attach->priv = exynos_attach;
+
+       DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
 
 err_unlock:
        mutex_unlock(&dev->struct_mutex);
@@ -104,10 +142,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
                                                struct sg_table *sgt,
                                                enum dma_data_direction dir)
 {
-       dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
-       sg_free_table(sgt);
-       kfree(sgt);
-       sgt = NULL;
+       /* Nothing to do. */
 }
 
 static void exynos_dmabuf_release(struct dma_buf *dmabuf)
@@ -169,6 +204,8 @@ static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
 }
 
 static struct dma_buf_ops exynos_dmabuf_ops = {
+       .attach                 = exynos_gem_attach_dma_buf,
+       .detach                 = exynos_gem_detach_dma_buf,
        .map_dma_buf            = exynos_gem_map_dma_buf,
        .unmap_dma_buf          = exynos_gem_unmap_dma_buf,
        .kmap                   = exynos_gem_dmabuf_kmap,
@@ -196,7 +233,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
        struct scatterlist *sgl;
        struct exynos_drm_gem_obj *exynos_gem_obj;
        struct exynos_drm_gem_buf *buffer;
-       struct page *page;
        int ret;
 
        DRM_DEBUG_PRIME("%s\n", __FILE__);
@@ -233,38 +269,27 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
                goto err_unmap_attach;
        }
 
-       buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
-       if (!buffer->pages) {
-               DRM_ERROR("failed to allocate pages.\n");
-               ret = -ENOMEM;
-               goto err_free_buffer;
-       }
-
        exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
        if (!exynos_gem_obj) {
                ret = -ENOMEM;
-               goto err_free_pages;
+               goto err_free_buffer;
        }
 
        sgl = sgt->sgl;
 
-       if (sgt->nents == 1) {
-               buffer->dma_addr = sg_dma_address(sgt->sgl);
-               buffer->size = sg_dma_len(sgt->sgl);
+       buffer->size = dma_buf->size;
+       buffer->dma_addr = sg_dma_address(sgl);
 
+       if (sgt->nents == 1) {
                /* always physically continuous memory if sgt->nents is 1. */
                exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
        } else {
-               unsigned int i = 0;
-
-               buffer->dma_addr = sg_dma_address(sgl);
-               while (i < sgt->nents) {
-                       buffer->pages[i] = sg_page(sgl);
-                       buffer->size += sg_dma_len(sgl);
-                       sgl = sg_next(sgl);
-                       i++;
-               }
-
+               /*
+                * this case could be CONTIG or NONCONTIG type but for now
+                * sets NONCONTIG.
+                * TODO. we have to find a way that exporter can notify
+                * the type of its own buffer to importer.
+                */
                exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
        }
 
@@ -277,9 +302,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
 
        return &exynos_gem_obj->base;
 
-err_free_pages:
-       kfree(buffer->pages);
-       buffer->pages = NULL;
 err_free_buffer:
        kfree(buffer);
        buffer = NULL;
index 1de7baa..e0a8e80 100644 (file)
@@ -40,6 +40,8 @@
 #include "exynos_drm_vidi.h"
 #include "exynos_drm_dmabuf.h"
 #include "exynos_drm_g2d.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_iommu.h"
 
 #define DRIVER_NAME    "exynos"
 #define DRIVER_DESC    "Samsung SoC DRM"
@@ -49,6 +51,9 @@
 
 #define VBLANK_OFF_DELAY       50000
 
+/* platform device pointer for eynos drm device. */
+static struct platform_device *exynos_drm_pdev;
+
 static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
 {
        struct exynos_drm_private *private;
@@ -66,6 +71,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
        INIT_LIST_HEAD(&private->pageflip_event_list);
        dev->dev_private = (void *)private;
 
+       /*
+        * create mapping to manage iommu table and set a pointer to iommu
+        * mapping structure to iommu_mapping of private data.
+        * also this iommu_mapping can be used to check if iommu is supported
+        * or not.
+        */
+       ret = drm_create_iommu_mapping(dev);
+       if (ret < 0) {
+               DRM_ERROR("failed to create iommu mapping.\n");
+               goto err_crtc;
+       }
+
        drm_mode_config_init(dev);
 
        /* init kms poll for handling hpd */
@@ -80,7 +97,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
        for (nr = 0; nr < MAX_CRTC; nr++) {
                ret = exynos_drm_crtc_create(dev, nr);
                if (ret)
-                       goto err_crtc;
+                       goto err_release_iommu_mapping;
        }
 
        for (nr = 0; nr < MAX_PLANE; nr++) {
@@ -89,12 +106,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
 
                plane = exynos_plane_init(dev, possible_crtcs, false);
                if (!plane)
-                       goto err_crtc;
+                       goto err_release_iommu_mapping;
        }
 
        ret = drm_vblank_init(dev, MAX_CRTC);
        if (ret)
-               goto err_crtc;
+               goto err_release_iommu_mapping;
 
        /*
         * probe sub drivers such as display controller and hdmi driver,
@@ -126,6 +143,8 @@ err_drm_device:
        exynos_drm_device_unregister(dev);
 err_vblank:
        drm_vblank_cleanup(dev);
+err_release_iommu_mapping:
+       drm_release_iommu_mapping(dev);
 err_crtc:
        drm_mode_config_cleanup(dev);
        kfree(private);
@@ -142,6 +161,8 @@ static int exynos_drm_unload(struct drm_device *dev)
        drm_vblank_cleanup(dev);
        drm_kms_helper_poll_fini(dev);
        drm_mode_config_cleanup(dev);
+
+       drm_release_iommu_mapping(dev);
        kfree(dev->dev_private);
 
        dev->dev_private = NULL;
@@ -229,6 +250,14 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
                        exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
        DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
                        exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
+                       exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
+                       exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF,
+                       exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL,
+                       exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH),
 };
 
 static const struct file_operations exynos_drm_driver_fops = {
@@ -279,6 +308,7 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
 {
        DRM_DEBUG_DRIVER("%s\n", __FILE__);
 
+       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
        exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
 
        return drm_platform_init(&exynos_drm_driver, pdev);
@@ -324,6 +354,10 @@ static int __init exynos_drm_init(void)
        ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
        if (ret < 0)
                goto out_common_hdmi;
+
+       ret = exynos_platform_device_hdmi_register();
+       if (ret < 0)
+               goto out_common_hdmi_dev;
 #endif
 
 #ifdef CONFIG_DRM_EXYNOS_VIDI
@@ -338,24 +372,80 @@ static int __init exynos_drm_init(void)
                goto out_g2d;
 #endif
 
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+       ret = platform_driver_register(&fimc_driver);
+       if (ret < 0)
+               goto out_fimc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+       ret = platform_driver_register(&rotator_driver);
+       if (ret < 0)
+               goto out_rotator;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+       ret = platform_driver_register(&gsc_driver);
+       if (ret < 0)
+               goto out_gsc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+       ret = platform_driver_register(&ipp_driver);
+       if (ret < 0)
+               goto out_ipp;
+#endif
+
        ret = platform_driver_register(&exynos_drm_platform_driver);
        if (ret < 0)
+               goto out_drm;
+
+       exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
+                               NULL, 0);
+       if (IS_ERR_OR_NULL(exynos_drm_pdev)) {
+               ret = PTR_ERR(exynos_drm_pdev);
                goto out;
+       }
 
        return 0;
 
 out:
+       platform_driver_unregister(&exynos_drm_platform_driver);
+
+out_drm:
+#ifdef CONFIG_DRM_EXYNOS_IPP
+       platform_driver_unregister(&ipp_driver);
+out_ipp:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+       platform_driver_unregister(&gsc_driver);
+out_gsc:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+       platform_driver_unregister(&rotator_driver);
+out_rotator:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+       platform_driver_unregister(&fimc_driver);
+out_fimc:
+#endif
+
 #ifdef CONFIG_DRM_EXYNOS_G2D
        platform_driver_unregister(&g2d_driver);
 out_g2d:
 #endif
 
 #ifdef CONFIG_DRM_EXYNOS_VIDI
-out_vidi:
        platform_driver_unregister(&vidi_driver);
+out_vidi:
 #endif
 
 #ifdef CONFIG_DRM_EXYNOS_HDMI
+       exynos_platform_device_hdmi_unregister();
+out_common_hdmi_dev:
        platform_driver_unregister(&exynos_drm_common_hdmi_driver);
 out_common_hdmi:
        platform_driver_unregister(&mixer_driver);
@@ -375,13 +465,32 @@ static void __exit exynos_drm_exit(void)
 {
        DRM_DEBUG_DRIVER("%s\n", __FILE__);
 
+       platform_device_unregister(exynos_drm_pdev);
+
        platform_driver_unregister(&exynos_drm_platform_driver);
 
+#ifdef CONFIG_DRM_EXYNOS_IPP
+       platform_driver_unregister(&ipp_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+       platform_driver_unregister(&gsc_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+       platform_driver_unregister(&rotator_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+       platform_driver_unregister(&fimc_driver);
+#endif
+
 #ifdef CONFIG_DRM_EXYNOS_G2D
        platform_driver_unregister(&g2d_driver);
 #endif
 
 #ifdef CONFIG_DRM_EXYNOS_HDMI
+       exynos_platform_device_hdmi_unregister();
        platform_driver_unregister(&exynos_drm_common_hdmi_driver);
        platform_driver_unregister(&mixer_driver);
        platform_driver_unregister(&hdmi_driver);
index a342310..f5a9774 100644 (file)
@@ -74,8 +74,6 @@ enum exynos_drm_output_type {
  * @commit: apply hardware specific overlay data to registers.
  * @enable: enable hardware specific overlay.
  * @disable: disable hardware specific overlay.
- * @wait_for_vblank: wait for vblank interrupt to make sure that
- *     hardware overlay is disabled.
  */
 struct exynos_drm_overlay_ops {
        void (*mode_set)(struct device *subdrv_dev,
@@ -83,7 +81,6 @@ struct exynos_drm_overlay_ops {
        void (*commit)(struct device *subdrv_dev, int zpos);
        void (*enable)(struct device *subdrv_dev, int zpos);
        void (*disable)(struct device *subdrv_dev, int zpos);
-       void (*wait_for_vblank)(struct device *subdrv_dev);
 };
 
 /*
@@ -110,7 +107,6 @@ struct exynos_drm_overlay_ops {
  * @pixel_format: fourcc pixel format of this overlay
  * @dma_addr: array of bus(accessed by dma) address to the memory region
  *           allocated for a overlay.
- * @vaddr: array of virtual memory addresss to this overlay.
  * @zpos: order of overlay layer(z position).
  * @default_win: a window to be enabled.
  * @color_key: color key on or off.
@@ -142,7 +138,6 @@ struct exynos_drm_overlay {
        unsigned int pitch;
        uint32_t pixel_format;
        dma_addr_t dma_addr[MAX_FB_BUFFER];
-       void __iomem *vaddr[MAX_FB_BUFFER];
        int zpos;
 
        bool default_win;
@@ -186,6 +181,8 @@ struct exynos_drm_display_ops {
  * @commit: set current hw specific display mode to hw.
  * @enable_vblank: specific driver callback for enabling vblank interrupt.
  * @disable_vblank: specific driver callback for disabling vblank interrupt.
+ * @wait_for_vblank: wait for vblank interrupt to make sure that
+ *     hardware overlay is updated.
  */
 struct exynos_drm_manager_ops {
        void (*dpms)(struct device *subdrv_dev, int mode);
@@ -200,6 +197,7 @@ struct exynos_drm_manager_ops {
        void (*commit)(struct device *subdrv_dev);
        int (*enable_vblank)(struct device *subdrv_dev);
        void (*disable_vblank)(struct device *subdrv_dev);
+       void (*wait_for_vblank)(struct device *subdrv_dev);
 };
 
 /*
@@ -231,16 +229,28 @@ struct exynos_drm_g2d_private {
        struct device           *dev;
        struct list_head        inuse_cmdlist;
        struct list_head        event_list;
-       struct list_head        gem_list;
-       unsigned int            gem_nr;
+       struct list_head        userptr_list;
+};
+
+struct exynos_drm_ipp_private {
+       struct device   *dev;
+       struct list_head        event_list;
 };
 
 struct drm_exynos_file_private {
        struct exynos_drm_g2d_private   *g2d_priv;
+       struct exynos_drm_ipp_private   *ipp_priv;
 };
 
 /*
  * Exynos drm private structure.
+ *
+ * @da_start: start address to device address space.
+ *     with iommu, device address space starts from this address
+ *     otherwise default one.
+ * @da_space_size: size of device address space.
+ *     if 0 then default value is used for it.
+ * @da_space_order: order to device address space.
  */
 struct exynos_drm_private {
        struct drm_fb_helper *fb_helper;
@@ -255,6 +265,10 @@ struct exynos_drm_private {
        struct drm_crtc *crtc[MAX_CRTC];
        struct drm_property *plane_zpos_property;
        struct drm_property *crtc_mode_property;
+
+       unsigned long da_start;
+       unsigned long da_space_size;
+       unsigned long da_space_order;
 };
 
 /*
@@ -318,10 +332,25 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
 int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
 void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
 
+/*
+ * this function registers exynos drm hdmi platform device. It ensures only one
+ * instance of the device is created.
+ */
+extern int exynos_platform_device_hdmi_register(void);
+
+/*
+ * this function unregisters exynos drm hdmi platform device if it exists.
+ */
+void exynos_platform_device_hdmi_unregister(void);
+
 extern struct platform_driver fimd_driver;
 extern struct platform_driver hdmi_driver;
 extern struct platform_driver mixer_driver;
 extern struct platform_driver exynos_drm_common_hdmi_driver;
 extern struct platform_driver vidi_driver;
 extern struct platform_driver g2d_driver;
+extern struct platform_driver fimc_driver;
+extern struct platform_driver rotator_driver;
+extern struct platform_driver gsc_driver;
+extern struct platform_driver ipp_driver;
 #endif
index 241ad1e..3014852 100644 (file)
@@ -226,8 +226,40 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
         * already updated or not by exynos_drm_encoder_dpms function.
         */
        exynos_encoder->updated = true;
+
+       /*
+        * In case of setcrtc, there is no way to update encoder's dpms
+        * so update it here.
+        */
+       exynos_encoder->dpms = DRM_MODE_DPMS_ON;
+}
+
+void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb)
+{
+       struct exynos_drm_encoder *exynos_encoder;
+       struct exynos_drm_manager_ops *ops;
+       struct drm_device *dev = fb->dev;
+       struct drm_encoder *encoder;
+
+       /*
+        * make sure that overlay data are updated to real hardware
+        * for all encoders.
+        */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               exynos_encoder = to_exynos_encoder(encoder);
+               ops = exynos_encoder->manager->ops;
+
+               /*
+                * wait for vblank interrupt
+                * - this makes sure that overlay data are updated to
+                *      real hardware.
+                */
+               if (ops->wait_for_vblank)
+                       ops->wait_for_vblank(exynos_encoder->manager->dev);
+       }
 }
 
+
 static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
 {
        struct drm_plane *plane;
@@ -499,14 +531,4 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
 
        if (overlay_ops && overlay_ops->disable)
                overlay_ops->disable(manager->dev, zpos);
-
-       /*
-        * wait for vblank interrupt
-        * - this makes sure that hardware overlay is disabled to avoid
-        * for the dma accesses to memory after gem buffer was released
-        * because the setting for disabling the overlay will be updated
-        * at vsync.
-        */
-       if (overlay_ops->wait_for_vblank)
-               overlay_ops->wait_for_vblank(manager->dev);
 }
index 6470d9d..88bb25a 100644 (file)
@@ -46,5 +46,6 @@ void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
 void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
 void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
 void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb);
 
 #endif
index 4ef4cd3..5426cc5 100644 (file)
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
+#include <uapi/drm/exynos_drm.h>
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
+#include "exynos_drm_encoder.h"
 
 #define to_exynos_fb(x)        container_of(x, struct exynos_drm_fb, fb)
 
@@ -50,6 +53,32 @@ struct exynos_drm_fb {
        struct exynos_drm_gem_obj       *exynos_gem_obj[MAX_FB_BUFFER];
 };
 
+static int check_fb_gem_memory_type(struct drm_device *drm_dev,
+                               struct exynos_drm_gem_obj *exynos_gem_obj)
+{
+       unsigned int flags;
+
+       /*
+        * if exynos drm driver supports iommu then framebuffer can use
+        * all the buffer types.
+        */
+       if (is_drm_iommu_supported(drm_dev))
+               return 0;
+
+       flags = exynos_gem_obj->flags;
+
+       /*
+        * without iommu support, not support physically non-continuous memory
+        * for framebuffer.
+        */
+       if (IS_NONCONTIG_BUFFER(flags)) {
+               DRM_ERROR("cannot use this gem memory type for fb.\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
 {
        struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
@@ -57,6 +86,9 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
+       /* make sure that overlay data are updated before relesing fb. */
+       exynos_drm_encoder_complete_scanout(fb);
+
        drm_framebuffer_cleanup(fb);
 
        for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
@@ -128,23 +160,32 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
                            struct drm_gem_object *obj)
 {
        struct exynos_drm_fb *exynos_fb;
+       struct exynos_drm_gem_obj *exynos_gem_obj;
        int ret;
 
+       exynos_gem_obj = to_exynos_gem_obj(obj);
+
+       ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+       if (ret < 0) {
+               DRM_ERROR("cannot use this gem memory type for fb.\n");
+               return ERR_PTR(-EINVAL);
+       }
+
        exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
        if (!exynos_fb) {
                DRM_ERROR("failed to allocate exynos drm framebuffer\n");
                return ERR_PTR(-ENOMEM);
        }
 
+       drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+       exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
+
        ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
        if (ret) {
                DRM_ERROR("failed to initialize framebuffer\n");
                return ERR_PTR(ret);
        }
 
-       drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
-       exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
-
        return &exynos_fb->fb;
 }
 
@@ -190,9 +231,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                      struct drm_mode_fb_cmd2 *mode_cmd)
 {
        struct drm_gem_object *obj;
-       struct drm_framebuffer *fb;
        struct exynos_drm_fb *exynos_fb;
-       int i;
+       int i, ret;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -202,30 +242,56 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                return ERR_PTR(-ENOENT);
        }
 
-       fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
-       if (IS_ERR(fb)) {
-               drm_gem_object_unreference_unlocked(obj);
-               return fb;
+       exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
+       if (!exynos_fb) {
+               DRM_ERROR("failed to allocate exynos drm framebuffer\n");
+               return ERR_PTR(-ENOMEM);
        }
 
-       exynos_fb = to_exynos_fb(fb);
+       drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+       exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
        exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
 
        DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
 
        for (i = 1; i < exynos_fb->buf_cnt; i++) {
+               struct exynos_drm_gem_obj *exynos_gem_obj;
+               int ret;
+
                obj = drm_gem_object_lookup(dev, file_priv,
                                mode_cmd->handles[i]);
                if (!obj) {
                        DRM_ERROR("failed to lookup gem object\n");
-                       exynos_drm_fb_destroy(fb);
+                       kfree(exynos_fb);
                        return ERR_PTR(-ENOENT);
                }
 
+               exynos_gem_obj = to_exynos_gem_obj(obj);
+
+               ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+               if (ret < 0) {
+                       DRM_ERROR("cannot use this gem memory type for fb.\n");
+                       kfree(exynos_fb);
+                       return ERR_PTR(ret);
+               }
+
                exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
        }
 
-       return fb;
+       ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
+       if (ret) {
+               for (i = 0; i < exynos_fb->buf_cnt; i++) {
+                       struct exynos_drm_gem_obj *gem_obj;
+
+                       gem_obj = exynos_fb->exynos_gem_obj[i];
+                       drm_gem_object_unreference_unlocked(&gem_obj->base);
+               }
+
+               kfree(exynos_fb);
+               return ERR_PTR(ret);
+       }
+
+       return &exynos_fb->fb;
 }
 
 struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
@@ -243,9 +309,7 @@ struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
        if (!buffer)
                return NULL;
 
-       DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
-                       (unsigned long)buffer->kvaddr,
-                       (unsigned long)buffer->dma_addr);
+       DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr);
 
        return buffer;
 }
index 67eb6ba..f433eb7 100644 (file)
@@ -46,8 +46,38 @@ struct exynos_drm_fbdev {
        struct exynos_drm_gem_obj       *exynos_gem_obj;
 };
 
+static int exynos_drm_fb_mmap(struct fb_info *info,
+                       struct vm_area_struct *vma)
+{
+       struct drm_fb_helper *helper = info->par;
+       struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
+       struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
+       struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
+       unsigned long vm_size;
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+
+       vm_size = vma->vm_end - vma->vm_start;
+
+       if (vm_size > buffer->size)
+               return -EINVAL;
+
+       ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
+               buffer->dma_addr, buffer->size, &buffer->dma_attrs);
+       if (ret < 0) {
+               DRM_ERROR("failed to mmap.\n");
+               return ret;
+       }
+
+       return 0;
+}
+
 static struct fb_ops exynos_drm_fb_ops = {
        .owner          = THIS_MODULE,
+       .fb_mmap        = exynos_drm_fb_mmap,
        .fb_fillrect    = cfb_fillrect,
        .fb_copyarea    = cfb_copyarea,
        .fb_imageblit   = cfb_imageblit,
@@ -79,6 +109,17 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
                return -EFAULT;
        }
 
+       /* map pages with kernel virtual space. */
+       if (!buffer->kvaddr) {
+               unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
+               buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
+                                       pgprot_writecombine(PAGE_KERNEL));
+               if (!buffer->kvaddr) {
+                       DRM_ERROR("failed to map pages to kernel space.\n");
+                       return -EIO;
+               }
+       }
+
        /* buffer count to framebuffer always is 1 at booting time. */
        exynos_drm_fb_set_buf_cnt(fb, 1);
 
@@ -87,7 +128,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
 
        dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
        fbi->screen_base = buffer->kvaddr + offset;
-       fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset);
+       fbi->fix.smem_start = (unsigned long)
+                       (page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
        fbi->screen_size = size;
        fbi->fix.smem_len = size;
 
@@ -133,7 +175,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
        exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
        if (IS_ERR(exynos_gem_obj)) {
                ret = PTR_ERR(exynos_gem_obj);
-               goto out;
+               goto err_release_framebuffer;
        }
 
        exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
@@ -143,7 +185,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
        if (IS_ERR_OR_NULL(helper->fb)) {
                DRM_ERROR("failed to create drm framebuffer.\n");
                ret = PTR_ERR(helper->fb);
-               goto out;
+               goto err_destroy_gem;
        }
 
        helper->fbdev = fbi;
@@ -155,14 +197,24 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
        ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
        if (ret) {
                DRM_ERROR("failed to allocate cmap.\n");
-               goto out;
+               goto err_destroy_framebuffer;
        }
 
        ret = exynos_drm_fbdev_update(helper, helper->fb);
-       if (ret < 0) {
-               fb_dealloc_cmap(&fbi->cmap);
-               goto out;
-       }
+       if (ret < 0)
+               goto err_dealloc_cmap;
+
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+
+err_dealloc_cmap:
+       fb_dealloc_cmap(&fbi->cmap);
+err_destroy_framebuffer:
+       drm_framebuffer_cleanup(helper->fb);
+err_destroy_gem:
+       exynos_drm_gem_destroy(exynos_gem_obj);
+err_release_framebuffer:
+       framebuffer_release(fbi);
 
 /*
  * if failed, all resources allocated above would be released by
@@ -264,8 +316,13 @@ err_init:
 static void exynos_drm_fbdev_destroy(struct drm_device *dev,
                                      struct drm_fb_helper *fb_helper)
 {
+       struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
+       struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
        struct drm_framebuffer *fb;
 
+       if (exynos_gem_obj->buffer->kvaddr)
+               vunmap(exynos_gem_obj->buffer->kvaddr);
+
        /* release drm framebuffer and real buffer */
        if (fb_helper->fb && fb_helper->fb->funcs) {
                fb = fb_helper->fb;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
new file mode 100644 (file)
index 0000000..61ea242
--- /dev/null
@@ -0,0 +1,2001 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *     Jinyoung Jeon <jy0.jeon@samsung.com>
+ *     Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-fimc.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_fimc.h"
+
+/*
+ * FIMC is stand for Fully Interactive Mobile Camera and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * FIMC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> FIMC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> FIMC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> FIMC H/W ----> FIMD.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define FIMC_MAX_DEVS  4
+#define FIMC_MAX_SRC   2
+#define FIMC_MAX_DST   32
+#define FIMC_SHFACTOR  10
+#define FIMC_BUF_STOP  1
+#define FIMC_BUF_START 2
+#define FIMC_REG_SZ            32
+#define FIMC_WIDTH_ITU_709     1280
+#define FIMC_REFRESH_MAX       60
+#define FIMC_REFRESH_MIN       12
+#define FIMC_CROP_MAX  8192
+#define FIMC_CROP_MIN  32
+#define FIMC_SCALE_MAX 4224
+#define FIMC_SCALE_MIN 32
+
+#define get_fimc_context(dev)  platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv)    container_of(ippdrv,\
+                                       struct fimc_context, ippdrv);
+#define fimc_read(offset)              readl(ctx->regs + (offset))
+#define fimc_write(cfg, offset)        writel(cfg, ctx->regs + (offset))
+
+enum fimc_wb {
+       FIMC_WB_NONE,
+       FIMC_WB_A,
+       FIMC_WB_B,
+};
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @bypass: unused scaler path.
+ * @up_h: horizontal scale up.
+ * @up_v: vertical scale up.
+ * @hratio: horizontal ratio.
+ * @vratio: vertical ratio.
+ */
+struct fimc_scaler {
+       bool    range;
+       bool bypass;
+       bool up_h;
+       bool up_v;
+       u32 hratio;
+       u32 vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual table 43-1.
+ * @in_hori: scaler input horizontal size.
+ * @bypass: scaler bypass mode.
+ * @dst_h_wo_rot: target horizontal size without output rotation.
+ * @dst_h_rot: target horizontal size with output rotation.
+ * @rl_w_wo_rot: real width without input rotation.
+ * @rl_h_rot: real height without output rotation.
+ */
+struct fimc_capability {
+       /* scaler */
+       u32     in_hori;
+       u32     bypass;
+       /* output rotator */
+       u32     dst_h_wo_rot;
+       u32     dst_h_rot;
+       /* input rotator */
+       u32     rl_w_wo_rot;
+       u32     rl_h_rot;
+};
+
+/*
+ * A structure of fimc driver data.
+ *
+ * @parent_clk: name of parent clock.
+ */
+struct fimc_driverdata {
+       char    *parent_clk;
+};
+
+/*
+ * A structure of fimc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @sclk_fimc_clk: fimc source clock.
+ * @fimc_clk: fimc clock.
+ * @wb_clk: writeback a clock.
+ * @wb_b_clk: writeback b clock.
+ * @sc: scaler infomations.
+ * @odr: ordering of YUV.
+ * @ver: fimc version.
+ * @pol: porarity of writeback.
+ * @id: fimc id.
+ * @irq: irq number.
+ * @suspended: qos operations.
+ */
+struct fimc_context {
+       struct exynos_drm_ippdrv        ippdrv;
+       struct resource *regs_res;
+       void __iomem    *regs;
+       struct mutex    lock;
+       struct clk      *sclk_fimc_clk;
+       struct clk      *fimc_clk;
+       struct clk      *wb_clk;
+       struct clk      *wb_b_clk;
+       struct fimc_scaler      sc;
+       struct fimc_driverdata  *ddata;
+       struct exynos_drm_ipp_pol       pol;
+       int     id;
+       int     irq;
+       bool    suspended;
+};
+
+static void fimc_sw_reset(struct fimc_context *ctx, bool pattern)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:pattern[%d]\n", __func__, pattern);
+
+       cfg = fimc_read(EXYNOS_CISRCFMT);
+       cfg |= EXYNOS_CISRCFMT_ITU601_8BIT;
+       if (pattern)
+               cfg |= EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR;
+
+       fimc_write(cfg, EXYNOS_CISRCFMT);
+
+       /* s/w reset */
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       cfg |= (EXYNOS_CIGCTRL_SWRST);
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+
+       /* s/w reset complete */
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       cfg &= ~EXYNOS_CIGCTRL_SWRST;
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+
+       /* reset sequence */
+       fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+}
+
+static void fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
+{
+       u32 camblk_cfg;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       camblk_cfg = readl(SYSREG_CAMERA_BLK);
+       camblk_cfg &= ~(SYSREG_FIMD0WB_DEST_MASK);
+       camblk_cfg |= ctx->id << (SYSREG_FIMD0WB_DEST_SHIFT);
+
+       writel(camblk_cfg, SYSREG_CAMERA_BLK);
+}
+
+static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:wb[%d]\n", __func__, wb);
+
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
+               EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
+               EXYNOS_CIGCTRL_SELCAM_MIPI_MASK |
+               EXYNOS_CIGCTRL_SELCAM_FIMC_MASK |
+               EXYNOS_CIGCTRL_SELWB_CAMIF_MASK |
+               EXYNOS_CIGCTRL_SELWRITEBACK_MASK);
+
+       switch (wb) {
+       case FIMC_WB_A:
+               cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A |
+                       EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+               break;
+       case FIMC_WB_B:
+               cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B |
+                       EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+               break;
+       case FIMC_WB_NONE:
+       default:
+               cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
+                       EXYNOS_CIGCTRL_SELWRITEBACK_A |
+                       EXYNOS_CIGCTRL_SELCAM_MIPI_A |
+                       EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
+               break;
+       }
+
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_set_polarity(struct fimc_context *ctx,
+               struct exynos_drm_ipp_pol *pol)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:inv_pclk[%d]inv_vsync[%d]\n",
+               __func__, pol->inv_pclk, pol->inv_vsync);
+       DRM_DEBUG_KMS("%s:inv_href[%d]inv_hsync[%d]\n",
+               __func__, pol->inv_href, pol->inv_hsync);
+
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
+                EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
+
+       if (pol->inv_pclk)
+               cfg |= EXYNOS_CIGCTRL_INVPOLPCLK;
+       if (pol->inv_vsync)
+               cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC;
+       if (pol->inv_href)
+               cfg |= EXYNOS_CIGCTRL_INVPOLHREF;
+       if (pol->inv_hsync)
+               cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
+
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       if (enable)
+               cfg |= EXYNOS_CIGCTRL_CAM_JPEG;
+       else
+               cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG;
+
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_irq(struct fimc_context *ctx, bool enable,
+               bool overflow, bool level)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+                       enable, overflow, level);
+
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       if (enable) {
+               cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_LEVEL);
+               cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE;
+               if (overflow)
+                       cfg |= EXYNOS_CIGCTRL_IRQ_OVFEN;
+               if (level)
+                       cfg |= EXYNOS_CIGCTRL_IRQ_LEVEL;
+       } else
+               cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_ENABLE);
+
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_clear_irq(struct fimc_context *ctx)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       cfg |= EXYNOS_CIGCTRL_IRQ_CLR;
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static bool fimc_check_ovf(struct fimc_context *ctx)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg, status, flag;
+
+       status = fimc_read(EXYNOS_CISTATUS);
+       flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
+               EXYNOS_CISTATUS_OVFICR;
+
+       DRM_DEBUG_KMS("%s:flag[0x%x]\n", __func__, flag);
+
+       if (status & flag) {
+               cfg = fimc_read(EXYNOS_CIWDOFST);
+               cfg |= (EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+                       EXYNOS_CIWDOFST_CLROVFICR);
+
+               fimc_write(cfg, EXYNOS_CIWDOFST);
+
+               cfg = fimc_read(EXYNOS_CIWDOFST);
+               cfg &= ~(EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+                       EXYNOS_CIWDOFST_CLROVFICR);
+
+               fimc_write(cfg, EXYNOS_CIWDOFST);
+
+               dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+                       ctx->id, status);
+               return true;
+       }
+
+       return false;
+}
+
+static bool fimc_check_frame_end(struct fimc_context *ctx)
+{
+       u32 cfg;
+
+       cfg = fimc_read(EXYNOS_CISTATUS);
+
+       DRM_DEBUG_KMS("%s:cfg[0x%x]\n", __func__, cfg);
+
+       if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
+               return false;
+
+       cfg &= ~(EXYNOS_CISTATUS_FRAMEEND);
+       fimc_write(cfg, EXYNOS_CISTATUS);
+
+       return true;
+}
+
+static int fimc_get_buf_id(struct fimc_context *ctx)
+{
+       u32 cfg;
+       int frame_cnt, buf_id;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       cfg = fimc_read(EXYNOS_CISTATUS2);
+       frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
+
+       if (frame_cnt == 0)
+               frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
+
+       DRM_DEBUG_KMS("%s:present[%d]before[%d]\n", __func__,
+               EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
+               EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
+
+       if (frame_cnt == 0) {
+               DRM_ERROR("failed to get frame count.\n");
+               return -EIO;
+       }
+
+       buf_id = frame_cnt - 1;
+       DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+       return buf_id;
+}
+
+static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+       cfg = fimc_read(EXYNOS_CIOCTRL);
+       if (enable)
+               cfg |= EXYNOS_CIOCTRL_LASTENDEN;
+       else
+               cfg &= ~EXYNOS_CIOCTRL_LASTENDEN;
+
+       fimc_write(cfg, EXYNOS_CIOCTRL);
+}
+
+
+static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+       /* RGB */
+       cfg = fimc_read(EXYNOS_CISCCTRL);
+       cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK;
+
+       switch (fmt) {
+       case DRM_FORMAT_RGB565:
+               cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
+               fimc_write(cfg, EXYNOS_CISCCTRL);
+               return 0;
+       case DRM_FORMAT_RGB888:
+       case DRM_FORMAT_XRGB8888:
+               cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
+               fimc_write(cfg, EXYNOS_CISCCTRL);
+               return 0;
+       default:
+               /* bypass */
+               break;
+       }
+
+       /* YUV */
+       cfg = fimc_read(EXYNOS_MSCTRL);
+       cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK |
+               EXYNOS_MSCTRL_C_INT_IN_2PLANE |
+               EXYNOS_MSCTRL_ORDER422_YCBYCR);
+
+       switch (fmt) {
+       case DRM_FORMAT_YUYV:
+               cfg |= EXYNOS_MSCTRL_ORDER422_YCBYCR;
+               break;
+       case DRM_FORMAT_YVYU:
+               cfg |= EXYNOS_MSCTRL_ORDER422_YCRYCB;
+               break;
+       case DRM_FORMAT_UYVY:
+               cfg |= EXYNOS_MSCTRL_ORDER422_CBYCRY;
+               break;
+       case DRM_FORMAT_VYUY:
+       case DRM_FORMAT_YUV444:
+               cfg |= EXYNOS_MSCTRL_ORDER422_CRYCBY;
+               break;
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV61:
+               cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CRCB |
+                       EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+               break;
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+               cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE;
+               break;
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV12MT:
+       case DRM_FORMAT_NV16:
+               cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR |
+                       EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid source yuv order 0x%x.\n", fmt);
+               return -EINVAL;
+       }
+
+       fimc_write(cfg, EXYNOS_MSCTRL);
+
+       return 0;
+}
+
+static int fimc_src_set_fmt(struct device *dev, u32 fmt)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+       cfg = fimc_read(EXYNOS_MSCTRL);
+       cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
+
+       switch (fmt) {
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_RGB888:
+       case DRM_FORMAT_XRGB8888:
+               cfg |= EXYNOS_MSCTRL_INFORMAT_RGB;
+               break;
+       case DRM_FORMAT_YUV444:
+               cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+               break;
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_VYUY:
+               cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE;
+               break;
+       case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV61:
+       case DRM_FORMAT_YUV422:
+               cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422;
+               break;
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV12MT:
+               cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid source format 0x%x.\n", fmt);
+               return -EINVAL;
+       }
+
+       fimc_write(cfg, EXYNOS_MSCTRL);
+
+       cfg = fimc_read(EXYNOS_CIDMAPARAM);
+       cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
+
+       if (fmt == DRM_FORMAT_NV12MT)
+               cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32;
+       else
+               cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
+
+       fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+       return fimc_src_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_src_set_transf(struct device *dev,
+               enum drm_exynos_degree degree,
+               enum drm_exynos_flip flip, bool *swap)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg1, cfg2;
+
+       DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+               degree, flip);
+
+       cfg1 = fimc_read(EXYNOS_MSCTRL);
+       cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
+               EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+
+       cfg2 = fimc_read(EXYNOS_CITRGFMT);
+       cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+
+       switch (degree) {
+       case EXYNOS_DRM_DEGREE_0:
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+               break;
+       case EXYNOS_DRM_DEGREE_90:
+               cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+               break;
+       case EXYNOS_DRM_DEGREE_180:
+               cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+                       EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+               break;
+       case EXYNOS_DRM_DEGREE_270:
+               cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+                       EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+               cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+               return -EINVAL;
+       }
+
+       fimc_write(cfg1, EXYNOS_MSCTRL);
+       fimc_write(cfg2, EXYNOS_CITRGFMT);
+       *swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
+
+       return 0;
+}
+
+static int fimc_set_window(struct fimc_context *ctx,
+               struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+       u32 cfg, h1, h2, v1, v2;
+
+       /* cropped image */
+       h1 = pos->x;
+       h2 = sz->hsize - pos->w - pos->x;
+       v1 = pos->y;
+       v2 = sz->vsize - pos->h - pos->y;
+
+       DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
+       __func__, pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize);
+       DRM_DEBUG_KMS("%s:h1[%d]h2[%d]v1[%d]v2[%d]\n", __func__,
+               h1, h2, v1, v2);
+
+       /*
+        * set window offset 1, 2 size
+        * check figure 43-21 in user manual
+        */
+       cfg = fimc_read(EXYNOS_CIWDOFST);
+       cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK |
+               EXYNOS_CIWDOFST_WINVEROFST_MASK);
+       cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) |
+               EXYNOS_CIWDOFST_WINVEROFST(v1));
+       cfg |= EXYNOS_CIWDOFST_WINOFSEN;
+       fimc_write(cfg, EXYNOS_CIWDOFST);
+
+       cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
+               EXYNOS_CIWDOFST2_WINVEROFST2(v2));
+       fimc_write(cfg, EXYNOS_CIWDOFST2);
+
+       return 0;
+}
+
+static int fimc_src_set_size(struct device *dev, int swap,
+               struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct drm_exynos_pos img_pos = *pos;
+       struct drm_exynos_sz img_sz = *sz;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+               __func__, swap, sz->hsize, sz->vsize);
+
+       /* original size */
+       cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
+               EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize));
+
+       fimc_write(cfg, EXYNOS_ORGISIZE);
+
+       DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n", __func__,
+               pos->x, pos->y, pos->w, pos->h);
+
+       if (swap) {
+               img_pos.w = pos->h;
+               img_pos.h = pos->w;
+               img_sz.hsize = sz->vsize;
+               img_sz.vsize = sz->hsize;
+       }
+
+       /* set input DMA image size */
+       cfg = fimc_read(EXYNOS_CIREAL_ISIZE);
+       cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
+               EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
+       cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) |
+               EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h));
+       fimc_write(cfg, EXYNOS_CIREAL_ISIZE);
+
+       /*
+        * set input FIFO image size
+        * for now, we support only ITU601 8 bit mode
+        */
+       cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
+               EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) |
+               EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize));
+       fimc_write(cfg, EXYNOS_CISRCFMT);
+
+       /* offset Y(RGB), Cb, Cr */
+       cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) |
+               EXYNOS_CIIYOFF_VERTICAL(img_pos.y));
+       fimc_write(cfg, EXYNOS_CIIYOFF);
+       cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) |
+               EXYNOS_CIICBOFF_VERTICAL(img_pos.y));
+       fimc_write(cfg, EXYNOS_CIICBOFF);
+       cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) |
+               EXYNOS_CIICROFF_VERTICAL(img_pos.y));
+       fimc_write(cfg, EXYNOS_CIICROFF);
+
+       return fimc_set_window(ctx, &img_pos, &img_sz);
+}
+
+static int fimc_src_set_addr(struct device *dev,
+               struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_property *property;
+       struct drm_exynos_ipp_config *config;
+
+       if (!c_node) {
+               DRM_ERROR("failed to get c_node.\n");
+               return -EINVAL;
+       }
+
+       property = &c_node->property;
+       if (!property) {
+               DRM_ERROR("failed to get property.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+               property->prop_id, buf_id, buf_type);
+
+       if (buf_id > FIMC_MAX_SRC) {
+               dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+               return -ENOMEM;
+       }
+
+       /* address register set */
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               config = &property->config[EXYNOS_DRM_OPS_SRC];
+               fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+                       EXYNOS_CIIYSA(buf_id));
+
+               if (config->fmt == DRM_FORMAT_YVU420) {
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+                               EXYNOS_CIICBSA(buf_id));
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+                               EXYNOS_CIICRSA(buf_id));
+               } else {
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+                               EXYNOS_CIICBSA(buf_id));
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+                               EXYNOS_CIICRSA(buf_id));
+               }
+               break;
+       case IPP_BUF_DEQUEUE:
+               fimc_write(0x0, EXYNOS_CIIYSA(buf_id));
+               fimc_write(0x0, EXYNOS_CIICBSA(buf_id));
+               fimc_write(0x0, EXYNOS_CIICRSA(buf_id));
+               break;
+       default:
+               /* bypass */
+               break;
+       }
+
+       return 0;
+}
+
+static struct exynos_drm_ipp_ops fimc_src_ops = {
+       .set_fmt = fimc_src_set_fmt,
+       .set_transf = fimc_src_set_transf,
+       .set_size = fimc_src_set_size,
+       .set_addr = fimc_src_set_addr,
+};
+
+static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+       /* RGB */
+       cfg = fimc_read(EXYNOS_CISCCTRL);
+       cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK;
+
+       switch (fmt) {
+       case DRM_FORMAT_RGB565:
+               cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
+               fimc_write(cfg, EXYNOS_CISCCTRL);
+               return 0;
+       case DRM_FORMAT_RGB888:
+               cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
+               fimc_write(cfg, EXYNOS_CISCCTRL);
+               return 0;
+       case DRM_FORMAT_XRGB8888:
+               cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
+                       EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
+               fimc_write(cfg, EXYNOS_CISCCTRL);
+               break;
+       default:
+               /* bypass */
+               break;
+       }
+
+       /* YUV */
+       cfg = fimc_read(EXYNOS_CIOCTRL);
+       cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK |
+               EXYNOS_CIOCTRL_ORDER422_MASK |
+               EXYNOS_CIOCTRL_YCBCR_PLANE_MASK);
+
+       switch (fmt) {
+       case DRM_FORMAT_XRGB8888:
+               cfg |= EXYNOS_CIOCTRL_ALPHA_OUT;
+               break;
+       case DRM_FORMAT_YUYV:
+               cfg |= EXYNOS_CIOCTRL_ORDER422_YCBYCR;
+               break;
+       case DRM_FORMAT_YVYU:
+               cfg |= EXYNOS_CIOCTRL_ORDER422_YCRYCB;
+               break;
+       case DRM_FORMAT_UYVY:
+               cfg |= EXYNOS_CIOCTRL_ORDER422_CBYCRY;
+               break;
+       case DRM_FORMAT_VYUY:
+               cfg |= EXYNOS_CIOCTRL_ORDER422_CRYCBY;
+               break;
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV61:
+               cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB;
+               cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+               break;
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+               cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE;
+               break;
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV12MT:
+       case DRM_FORMAT_NV16:
+               cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR;
+               cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+               return -EINVAL;
+       }
+
+       fimc_write(cfg, EXYNOS_CIOCTRL);
+
+       return 0;
+}
+
+static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+       cfg = fimc_read(EXYNOS_CIEXTEN);
+
+       if (fmt == DRM_FORMAT_AYUV) {
+               cfg |= EXYNOS_CIEXTEN_YUV444_OUT;
+               fimc_write(cfg, EXYNOS_CIEXTEN);
+       } else {
+               cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT;
+               fimc_write(cfg, EXYNOS_CIEXTEN);
+
+               cfg = fimc_read(EXYNOS_CITRGFMT);
+               cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK;
+
+               switch (fmt) {
+               case DRM_FORMAT_RGB565:
+               case DRM_FORMAT_RGB888:
+               case DRM_FORMAT_XRGB8888:
+                       cfg |= EXYNOS_CITRGFMT_OUTFORMAT_RGB;
+                       break;
+               case DRM_FORMAT_YUYV:
+               case DRM_FORMAT_YVYU:
+               case DRM_FORMAT_UYVY:
+               case DRM_FORMAT_VYUY:
+                       cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE;
+                       break;
+               case DRM_FORMAT_NV16:
+               case DRM_FORMAT_NV61:
+               case DRM_FORMAT_YUV422:
+                       cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422;
+                       break;
+               case DRM_FORMAT_YUV420:
+               case DRM_FORMAT_YVU420:
+               case DRM_FORMAT_NV12:
+               case DRM_FORMAT_NV12MT:
+               case DRM_FORMAT_NV21:
+                       cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
+                       break;
+               default:
+                       dev_err(ippdrv->dev, "inavlid target format 0x%x.\n",
+                               fmt);
+                       return -EINVAL;
+               }
+
+               fimc_write(cfg, EXYNOS_CITRGFMT);
+       }
+
+       cfg = fimc_read(EXYNOS_CIDMAPARAM);
+       cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
+
+       if (fmt == DRM_FORMAT_NV12MT)
+               cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32;
+       else
+               cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
+
+       fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+       return fimc_dst_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_dst_set_transf(struct device *dev,
+               enum drm_exynos_degree degree,
+               enum drm_exynos_flip flip, bool *swap)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+               degree, flip);
+
+       cfg = fimc_read(EXYNOS_CITRGFMT);
+       cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
+       cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+
+       switch (degree) {
+       case EXYNOS_DRM_DEGREE_0:
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+               break;
+       case EXYNOS_DRM_DEGREE_90:
+               cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+               break;
+       case EXYNOS_DRM_DEGREE_180:
+               cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+                       EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+               break;
+       case EXYNOS_DRM_DEGREE_270:
+               cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE |
+                       EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+                       EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+               return -EINVAL;
+       }
+
+       fimc_write(cfg, EXYNOS_CITRGFMT);
+       *swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
+
+       return 0;
+}
+
+static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift)
+{
+       DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+       if (src >= dst * 64) {
+               DRM_ERROR("failed to make ratio and shift.\n");
+               return -EINVAL;
+       } else if (src >= dst * 32) {
+               *ratio = 32;
+               *shift = 5;
+       } else if (src >= dst * 16) {
+               *ratio = 16;
+               *shift = 4;
+       } else if (src >= dst * 8) {
+               *ratio = 8;
+               *shift = 3;
+       } else if (src >= dst * 4) {
+               *ratio = 4;
+               *shift = 2;
+       } else if (src >= dst * 2) {
+               *ratio = 2;
+               *shift = 1;
+       } else {
+               *ratio = 1;
+               *shift = 0;
+       }
+
+       return 0;
+}
+
+static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
+               struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg, cfg_ext, shfactor;
+       u32 pre_dst_width, pre_dst_height;
+       u32 pre_hratio, hfactor, pre_vratio, vfactor;
+       int ret = 0;
+       u32 src_w, src_h, dst_w, dst_h;
+
+       cfg_ext = fimc_read(EXYNOS_CITRGFMT);
+       if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) {
+               src_w = src->h;
+               src_h = src->w;
+       } else {
+               src_w = src->w;
+               src_h = src->h;
+       }
+
+       if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) {
+               dst_w = dst->h;
+               dst_h = dst->w;
+       } else {
+               dst_w = dst->w;
+               dst_h = dst->h;
+       }
+
+       ret = fimc_get_ratio_shift(src_w, dst_w, &pre_hratio, &hfactor);
+       if (ret) {
+               dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+               return ret;
+       }
+
+       ret = fimc_get_ratio_shift(src_h, dst_h, &pre_vratio, &vfactor);
+       if (ret) {
+               dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+               return ret;
+       }
+
+       pre_dst_width = src_w / pre_hratio;
+       pre_dst_height = src_h / pre_vratio;
+       DRM_DEBUG_KMS("%s:pre_dst_width[%d]pre_dst_height[%d]\n", __func__,
+               pre_dst_width, pre_dst_height);
+       DRM_DEBUG_KMS("%s:pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n",
+               __func__, pre_hratio, hfactor, pre_vratio, vfactor);
+
+       sc->hratio = (src_w << 14) / (dst_w << hfactor);
+       sc->vratio = (src_h << 14) / (dst_h << vfactor);
+       sc->up_h = (dst_w >= src_w) ? true : false;
+       sc->up_v = (dst_h >= src_h) ? true : false;
+       DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
+       __func__, sc->hratio, sc->vratio, sc->up_h, sc->up_v);
+
+       shfactor = FIMC_SHFACTOR - (hfactor + vfactor);
+       DRM_DEBUG_KMS("%s:shfactor[%d]\n", __func__, shfactor);
+
+       cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
+               EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) |
+               EXYNOS_CISCPRERATIO_PREVERRATIO(pre_vratio));
+       fimc_write(cfg, EXYNOS_CISCPRERATIO);
+
+       cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) |
+               EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height));
+       fimc_write(cfg, EXYNOS_CISCPREDST);
+
+       return ret;
+}
+
+static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
+{
+       u32 cfg, cfg_ext;
+
+       DRM_DEBUG_KMS("%s:range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
+               __func__, sc->range, sc->bypass, sc->up_h, sc->up_v);
+       DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]\n",
+               __func__, sc->hratio, sc->vratio);
+
+       cfg = fimc_read(EXYNOS_CISCCTRL);
+       cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
+               EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V |
+               EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK |
+               EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK |
+               EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+               EXYNOS_CISCCTRL_CSCY2R_WIDE);
+
+       if (sc->range)
+               cfg |= (EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+                       EXYNOS_CISCCTRL_CSCY2R_WIDE);
+       if (sc->bypass)
+               cfg |= EXYNOS_CISCCTRL_SCALERBYPASS;
+       if (sc->up_h)
+               cfg |= EXYNOS_CISCCTRL_SCALEUP_H;
+       if (sc->up_v)
+               cfg |= EXYNOS_CISCCTRL_SCALEUP_V;
+
+       cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) |
+               EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6)));
+       fimc_write(cfg, EXYNOS_CISCCTRL);
+
+       cfg_ext = fimc_read(EXYNOS_CIEXTEN);
+       cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK;
+       cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK;
+       cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) |
+               EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio));
+       fimc_write(cfg_ext, EXYNOS_CIEXTEN);
+}
+
+static int fimc_dst_set_size(struct device *dev, int swap,
+               struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct drm_exynos_pos img_pos = *pos;
+       struct drm_exynos_sz img_sz = *sz;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+               __func__, swap, sz->hsize, sz->vsize);
+
+       /* original size */
+       cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
+               EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize));
+
+       fimc_write(cfg, EXYNOS_ORGOSIZE);
+
+       DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n",
+               __func__, pos->x, pos->y, pos->w, pos->h);
+
+       /* CSC ITU */
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
+
+       if (sz->hsize >= FIMC_WIDTH_ITU_709)
+               cfg |= EXYNOS_CIGCTRL_CSC_ITU709;
+       else
+               cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
+
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+
+       if (swap) {
+               img_pos.w = pos->h;
+               img_pos.h = pos->w;
+               img_sz.hsize = sz->vsize;
+               img_sz.vsize = sz->hsize;
+       }
+
+       /* target image size */
+       cfg = fimc_read(EXYNOS_CITRGFMT);
+       cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
+               EXYNOS_CITRGFMT_TARGETV_MASK);
+       cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) |
+               EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h));
+       fimc_write(cfg, EXYNOS_CITRGFMT);
+
+       /* target area */
+       cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h);
+       fimc_write(cfg, EXYNOS_CITAREA);
+
+       /* offset Y(RGB), Cb, Cr */
+       cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) |
+               EXYNOS_CIOYOFF_VERTICAL(img_pos.y));
+       fimc_write(cfg, EXYNOS_CIOYOFF);
+       cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) |
+               EXYNOS_CIOCBOFF_VERTICAL(img_pos.y));
+       fimc_write(cfg, EXYNOS_CIOCBOFF);
+       cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) |
+               EXYNOS_CIOCROFF_VERTICAL(img_pos.y));
+       fimc_write(cfg, EXYNOS_CIOCROFF);
+
+       return 0;
+}
+
+static int fimc_dst_get_buf_seq(struct fimc_context *ctx)
+{
+       u32 cfg, i, buf_num = 0;
+       u32 mask = 0x00000001;
+
+       cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+       for (i = 0; i < FIMC_REG_SZ; i++)
+               if (cfg & (mask << i))
+                       buf_num++;
+
+       DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+       return buf_num;
+}
+
+static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       bool enable;
+       u32 cfg;
+       u32 mask = 0x00000001 << buf_id;
+       int ret = 0;
+
+       DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+               buf_id, buf_type);
+
+       mutex_lock(&ctx->lock);
+
+       /* mask register set */
+       cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               enable = true;
+               break;
+       case IPP_BUF_DEQUEUE:
+               enable = false;
+               break;
+       default:
+               dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+               ret =  -EINVAL;
+               goto err_unlock;
+       }
+
+       /* sequence id */
+       cfg &= (~mask);
+       cfg |= (enable << buf_id);
+       fimc_write(cfg, EXYNOS_CIFCNTSEQ);
+
+       /* interrupt enable */
+       if (buf_type == IPP_BUF_ENQUEUE &&
+           fimc_dst_get_buf_seq(ctx) >= FIMC_BUF_START)
+               fimc_handle_irq(ctx, true, false, true);
+
+       /* interrupt disable */
+       if (buf_type == IPP_BUF_DEQUEUE &&
+           fimc_dst_get_buf_seq(ctx) <= FIMC_BUF_STOP)
+               fimc_handle_irq(ctx, false, false, true);
+
+err_unlock:
+       mutex_unlock(&ctx->lock);
+       return ret;
+}
+
+static int fimc_dst_set_addr(struct device *dev,
+               struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_property *property;
+       struct drm_exynos_ipp_config *config;
+
+       if (!c_node) {
+               DRM_ERROR("failed to get c_node.\n");
+               return -EINVAL;
+       }
+
+       property = &c_node->property;
+       if (!property) {
+               DRM_ERROR("failed to get property.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+               property->prop_id, buf_id, buf_type);
+
+       if (buf_id > FIMC_MAX_DST) {
+               dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+               return -ENOMEM;
+       }
+
+       /* address register set */
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               config = &property->config[EXYNOS_DRM_OPS_DST];
+
+               fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+                       EXYNOS_CIOYSA(buf_id));
+
+               if (config->fmt == DRM_FORMAT_YVU420) {
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+                               EXYNOS_CIOCBSA(buf_id));
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+                               EXYNOS_CIOCRSA(buf_id));
+               } else {
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+                               EXYNOS_CIOCBSA(buf_id));
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+                               EXYNOS_CIOCRSA(buf_id));
+               }
+               break;
+       case IPP_BUF_DEQUEUE:
+               fimc_write(0x0, EXYNOS_CIOYSA(buf_id));
+               fimc_write(0x0, EXYNOS_CIOCBSA(buf_id));
+               fimc_write(0x0, EXYNOS_CIOCRSA(buf_id));
+               break;
+       default:
+               /* bypass */
+               break;
+       }
+
+       return fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops fimc_dst_ops = {
+       .set_fmt = fimc_dst_set_fmt,
+       .set_transf = fimc_dst_set_transf,
+       .set_size = fimc_dst_set_size,
+       .set_addr = fimc_dst_set_addr,
+};
+
+static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
+{
+       DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+       if (enable) {
+               clk_enable(ctx->sclk_fimc_clk);
+               clk_enable(ctx->fimc_clk);
+               clk_enable(ctx->wb_clk);
+               ctx->suspended = false;
+       } else {
+               clk_disable(ctx->sclk_fimc_clk);
+               clk_disable(ctx->fimc_clk);
+               clk_disable(ctx->wb_clk);
+               ctx->suspended = true;
+       }
+
+       return 0;
+}
+
+static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
+{
+       struct fimc_context *ctx = dev_id;
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_event_work *event_work =
+               c_node->event_work;
+       int buf_id;
+
+       DRM_DEBUG_KMS("%s:fimc id[%d]\n", __func__, ctx->id);
+
+       fimc_clear_irq(ctx);
+       if (fimc_check_ovf(ctx))
+               return IRQ_NONE;
+
+       if (!fimc_check_frame_end(ctx))
+               return IRQ_NONE;
+
+       buf_id = fimc_get_buf_id(ctx);
+       if (buf_id < 0)
+               return IRQ_HANDLED;
+
+       DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+       if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) {
+               DRM_ERROR("failed to dequeue.\n");
+               return IRQ_HANDLED;
+       }
+
+       event_work->ippdrv = ippdrv;
+       event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+       queue_work(ippdrv->event_workq, (struct work_struct *)event_work);
+
+       return IRQ_HANDLED;
+}
+
+static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+       struct drm_exynos_ipp_prop_list *prop_list;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+       if (!prop_list) {
+               DRM_ERROR("failed to alloc property list.\n");
+               return -ENOMEM;
+       }
+
+       prop_list->version = 1;
+       prop_list->writeback = 1;
+       prop_list->refresh_min = FIMC_REFRESH_MIN;
+       prop_list->refresh_max = FIMC_REFRESH_MAX;
+       prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) |
+                               (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+                               (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+       prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+                               (1 << EXYNOS_DRM_DEGREE_90) |
+                               (1 << EXYNOS_DRM_DEGREE_180) |
+                               (1 << EXYNOS_DRM_DEGREE_270);
+       prop_list->csc = 1;
+       prop_list->crop = 1;
+       prop_list->crop_max.hsize = FIMC_CROP_MAX;
+       prop_list->crop_max.vsize = FIMC_CROP_MAX;
+       prop_list->crop_min.hsize = FIMC_CROP_MIN;
+       prop_list->crop_min.vsize = FIMC_CROP_MIN;
+       prop_list->scale = 1;
+       prop_list->scale_max.hsize = FIMC_SCALE_MAX;
+       prop_list->scale_max.vsize = FIMC_SCALE_MAX;
+       prop_list->scale_min.hsize = FIMC_SCALE_MIN;
+       prop_list->scale_min.vsize = FIMC_SCALE_MIN;
+
+       ippdrv->prop_list = prop_list;
+
+       return 0;
+}
+
+static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip)
+{
+       switch (flip) {
+       case EXYNOS_DRM_FLIP_NONE:
+       case EXYNOS_DRM_FLIP_VERTICAL:
+       case EXYNOS_DRM_FLIP_HORIZONTAL:
+               return true;
+       default:
+               DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+               return false;
+       }
+}
+
+static int fimc_ippdrv_check_property(struct device *dev,
+               struct drm_exynos_ipp_property *property)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+       struct drm_exynos_ipp_config *config;
+       struct drm_exynos_pos *pos;
+       struct drm_exynos_sz *sz;
+       bool swap;
+       int i;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       for_each_ipp_ops(i) {
+               if ((i == EXYNOS_DRM_OPS_SRC) &&
+                       (property->cmd == IPP_CMD_WB))
+                       continue;
+
+               config = &property->config[i];
+               pos = &config->pos;
+               sz = &config->sz;
+
+               /* check for flip */
+               if (!fimc_check_drm_flip(config->flip)) {
+                       DRM_ERROR("invalid flip.\n");
+                       goto err_property;
+               }
+
+               /* check for degree */
+               switch (config->degree) {
+               case EXYNOS_DRM_DEGREE_90:
+               case EXYNOS_DRM_DEGREE_270:
+                       swap = true;
+                       break;
+               case EXYNOS_DRM_DEGREE_0:
+               case EXYNOS_DRM_DEGREE_180:
+                       swap = false;
+                       break;
+               default:
+                       DRM_ERROR("invalid degree.\n");
+                       goto err_property;
+               }
+
+               /* check for buffer bound */
+               if ((pos->x + pos->w > sz->hsize) ||
+                       (pos->y + pos->h > sz->vsize)) {
+                       DRM_ERROR("out of buf bound.\n");
+                       goto err_property;
+               }
+
+               /* check for crop */
+               if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+                       if (swap) {
+                               if ((pos->h < pp->crop_min.hsize) ||
+                                       (sz->vsize > pp->crop_max.hsize) ||
+                                       (pos->w < pp->crop_min.vsize) ||
+                                       (sz->hsize > pp->crop_max.vsize)) {
+                                       DRM_ERROR("out of crop size.\n");
+                                       goto err_property;
+                               }
+                       } else {
+                               if ((pos->w < pp->crop_min.hsize) ||
+                                       (sz->hsize > pp->crop_max.hsize) ||
+                                       (pos->h < pp->crop_min.vsize) ||
+                                       (sz->vsize > pp->crop_max.vsize)) {
+                                       DRM_ERROR("out of crop size.\n");
+                                       goto err_property;
+                               }
+                       }
+               }
+
+               /* check for scale */
+               if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+                       if (swap) {
+                               if ((pos->h < pp->scale_min.hsize) ||
+                                       (sz->vsize > pp->scale_max.hsize) ||
+                                       (pos->w < pp->scale_min.vsize) ||
+                                       (sz->hsize > pp->scale_max.vsize)) {
+                                       DRM_ERROR("out of scale size.\n");
+                                       goto err_property;
+                               }
+                       } else {
+                               if ((pos->w < pp->scale_min.hsize) ||
+                                       (sz->hsize > pp->scale_max.hsize) ||
+                                       (pos->h < pp->scale_min.vsize) ||
+                                       (sz->vsize > pp->scale_max.vsize)) {
+                                       DRM_ERROR("out of scale size.\n");
+                                       goto err_property;
+                               }
+                       }
+               }
+       }
+
+       return 0;
+
+err_property:
+       for_each_ipp_ops(i) {
+               if ((i == EXYNOS_DRM_OPS_SRC) &&
+                       (property->cmd == IPP_CMD_WB))
+                       continue;
+
+               config = &property->config[i];
+               pos = &config->pos;
+               sz = &config->sz;
+
+               DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+                       i ? "dst" : "src", config->flip, config->degree,
+                       pos->x, pos->y, pos->w, pos->h,
+                       sz->hsize, sz->vsize);
+       }
+
+       return -EINVAL;
+}
+
+static void fimc_clear_addr(struct fimc_context *ctx)
+{
+       int i;
+
+       DRM_DEBUG_KMS("%s:\n", __func__);
+
+       for (i = 0; i < FIMC_MAX_SRC; i++) {
+               fimc_write(0, EXYNOS_CIIYSA(i));
+               fimc_write(0, EXYNOS_CIICBSA(i));
+               fimc_write(0, EXYNOS_CIICRSA(i));
+       }
+
+       for (i = 0; i < FIMC_MAX_DST; i++) {
+               fimc_write(0, EXYNOS_CIOYSA(i));
+               fimc_write(0, EXYNOS_CIOCBSA(i));
+               fimc_write(0, EXYNOS_CIOCRSA(i));
+       }
+}
+
+static int fimc_ippdrv_reset(struct device *dev)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       /* reset h/w block */
+       fimc_sw_reset(ctx, false);
+
+       /* reset scaler capability */
+       memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+
+       fimc_clear_addr(ctx);
+
+       return 0;
+}
+
+static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_property *property;
+       struct drm_exynos_ipp_config *config;
+       struct drm_exynos_pos   img_pos[EXYNOS_DRM_OPS_MAX];
+       struct drm_exynos_ipp_set_wb set_wb;
+       int ret, i;
+       u32 cfg0, cfg1;
+
+       DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+       if (!c_node) {
+               DRM_ERROR("failed to get c_node.\n");
+               return -EINVAL;
+       }
+
+       property = &c_node->property;
+       if (!property) {
+               DRM_ERROR("failed to get property.\n");
+               return -EINVAL;
+       }
+
+       fimc_handle_irq(ctx, true, false, true);
+
+       for_each_ipp_ops(i) {
+               config = &property->config[i];
+               img_pos[i] = config->pos;
+       }
+
+       ret = fimc_set_prescaler(ctx, &ctx->sc,
+               &img_pos[EXYNOS_DRM_OPS_SRC],
+               &img_pos[EXYNOS_DRM_OPS_DST]);
+       if (ret) {
+               dev_err(dev, "failed to set precalser.\n");
+               return ret;
+       }
+
+       /* If set ture, we can save jpeg about screen */
+       fimc_handle_jpeg(ctx, false);
+       fimc_set_scaler(ctx, &ctx->sc);
+       fimc_set_polarity(ctx, &ctx->pol);
+
+       switch (cmd) {
+       case IPP_CMD_M2M:
+               fimc_set_type_ctrl(ctx, FIMC_WB_NONE);
+               fimc_handle_lastend(ctx, false);
+
+               /* setup dma */
+               cfg0 = fimc_read(EXYNOS_MSCTRL);
+               cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
+               cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
+               fimc_write(cfg0, EXYNOS_MSCTRL);
+               break;
+       case IPP_CMD_WB:
+               fimc_set_type_ctrl(ctx, FIMC_WB_A);
+               fimc_handle_lastend(ctx, true);
+
+               /* setup FIMD */
+               fimc_set_camblk_fimd0_wb(ctx);
+
+               set_wb.enable = 1;
+               set_wb.refresh = property->refresh_rate;
+               exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+               break;
+       case IPP_CMD_OUTPUT:
+       default:
+               ret = -EINVAL;
+               dev_err(dev, "invalid operations.\n");
+               return ret;
+       }
+
+       /* Reset status */
+       fimc_write(0x0, EXYNOS_CISTATUS);
+
+       cfg0 = fimc_read(EXYNOS_CIIMGCPT);
+       cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+       cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+
+       /* Scaler */
+       cfg1 = fimc_read(EXYNOS_CISCCTRL);
+       cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK;
+       cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE |
+               EXYNOS_CISCCTRL_SCALERSTART);
+
+       fimc_write(cfg1, EXYNOS_CISCCTRL);
+
+       /* Enable image capture*/
+       cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN;
+       fimc_write(cfg0, EXYNOS_CIIMGCPT);
+
+       /* Disable frame end irq */
+       cfg0 = fimc_read(EXYNOS_CIGCTRL);
+       cfg0 &= ~EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+       fimc_write(cfg0, EXYNOS_CIGCTRL);
+
+       cfg0 = fimc_read(EXYNOS_CIOCTRL);
+       cfg0 &= ~EXYNOS_CIOCTRL_WEAVE_MASK;
+       fimc_write(cfg0, EXYNOS_CIOCTRL);
+
+       if (cmd == IPP_CMD_M2M) {
+               cfg0 = fimc_read(EXYNOS_MSCTRL);
+               cfg0 |= EXYNOS_MSCTRL_ENVID;
+               fimc_write(cfg0, EXYNOS_MSCTRL);
+
+               cfg0 = fimc_read(EXYNOS_MSCTRL);
+               cfg0 |= EXYNOS_MSCTRL_ENVID;
+               fimc_write(cfg0, EXYNOS_MSCTRL);
+       }
+
+       return 0;
+}
+
+static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+       switch (cmd) {
+       case IPP_CMD_M2M:
+               /* Source clear */
+               cfg = fimc_read(EXYNOS_MSCTRL);
+               cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
+               cfg &= ~EXYNOS_MSCTRL_ENVID;
+               fimc_write(cfg, EXYNOS_MSCTRL);
+               break;
+       case IPP_CMD_WB:
+               exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+               break;
+       case IPP_CMD_OUTPUT:
+       default:
+               dev_err(dev, "invalid operations.\n");
+               break;
+       }
+
+       fimc_handle_irq(ctx, false, false, true);
+
+       /* reset sequence */
+       fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+
+       /* Scaler disable */
+       cfg = fimc_read(EXYNOS_CISCCTRL);
+       cfg &= ~EXYNOS_CISCCTRL_SCALERSTART;
+       fimc_write(cfg, EXYNOS_CISCCTRL);
+
+       /* Disable image capture */
+       cfg = fimc_read(EXYNOS_CIIMGCPT);
+       cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
+       fimc_write(cfg, EXYNOS_CIIMGCPT);
+
+       /* Enable frame end irq */
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       cfg |= EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static int __devinit fimc_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct fimc_context *ctx;
+       struct clk      *parent_clk;
+       struct resource *res;
+       struct exynos_drm_ippdrv *ippdrv;
+       struct exynos_drm_fimc_pdata *pdata;
+       struct fimc_driverdata *ddata;
+       int ret;
+
+       pdata = pdev->dev.platform_data;
+       if (!pdata) {
+               dev_err(dev, "no platform data specified.\n");
+               return -EINVAL;
+       }
+
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+
+       ddata = (struct fimc_driverdata *)
+               platform_get_device_id(pdev)->driver_data;
+
+       /* clock control */
+       ctx->sclk_fimc_clk = clk_get(dev, "sclk_fimc");
+       if (IS_ERR(ctx->sclk_fimc_clk)) {
+               dev_err(dev, "failed to get src fimc clock.\n");
+               ret = PTR_ERR(ctx->sclk_fimc_clk);
+               goto err_ctx;
+       }
+       clk_enable(ctx->sclk_fimc_clk);
+
+       ctx->fimc_clk = clk_get(dev, "fimc");
+       if (IS_ERR(ctx->fimc_clk)) {
+               dev_err(dev, "failed to get fimc clock.\n");
+               ret = PTR_ERR(ctx->fimc_clk);
+               clk_disable(ctx->sclk_fimc_clk);
+               clk_put(ctx->sclk_fimc_clk);
+               goto err_ctx;
+       }
+
+       ctx->wb_clk = clk_get(dev, "pxl_async0");
+       if (IS_ERR(ctx->wb_clk)) {
+               dev_err(dev, "failed to get writeback a clock.\n");
+               ret = PTR_ERR(ctx->wb_clk);
+               clk_disable(ctx->sclk_fimc_clk);
+               clk_put(ctx->sclk_fimc_clk);
+               clk_put(ctx->fimc_clk);
+               goto err_ctx;
+       }
+
+       ctx->wb_b_clk = clk_get(dev, "pxl_async1");
+       if (IS_ERR(ctx->wb_b_clk)) {
+               dev_err(dev, "failed to get writeback b clock.\n");
+               ret = PTR_ERR(ctx->wb_b_clk);
+               clk_disable(ctx->sclk_fimc_clk);
+               clk_put(ctx->sclk_fimc_clk);
+               clk_put(ctx->fimc_clk);
+               clk_put(ctx->wb_clk);
+               goto err_ctx;
+       }
+
+       parent_clk = clk_get(dev, ddata->parent_clk);
+
+       if (IS_ERR(parent_clk)) {
+               dev_err(dev, "failed to get parent clock.\n");
+               ret = PTR_ERR(parent_clk);
+               clk_disable(ctx->sclk_fimc_clk);
+               clk_put(ctx->sclk_fimc_clk);
+               clk_put(ctx->fimc_clk);
+               clk_put(ctx->wb_clk);
+               clk_put(ctx->wb_b_clk);
+               goto err_ctx;
+       }
+
+       if (clk_set_parent(ctx->sclk_fimc_clk, parent_clk)) {
+               dev_err(dev, "failed to set parent.\n");
+               ret = -EINVAL;
+               clk_put(parent_clk);
+               clk_disable(ctx->sclk_fimc_clk);
+               clk_put(ctx->sclk_fimc_clk);
+               clk_put(ctx->fimc_clk);
+               clk_put(ctx->wb_clk);
+               clk_put(ctx->wb_b_clk);
+               goto err_ctx;
+       }
+
+       clk_put(parent_clk);
+       clk_set_rate(ctx->sclk_fimc_clk, pdata->clk_rate);
+
+       /* resource memory */
+       ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!ctx->regs_res) {
+               dev_err(dev, "failed to find registers.\n");
+               ret = -ENOENT;
+               goto err_clk;
+       }
+
+       ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
+       if (!ctx->regs) {
+               dev_err(dev, "failed to map registers.\n");
+               ret = -ENXIO;
+               goto err_clk;
+       }
+
+       /* resource irq */
+       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!res) {
+               dev_err(dev, "failed to request irq resource.\n");
+               ret = -ENOENT;
+               goto err_get_regs;
+       }
+
+       ctx->irq = res->start;
+       ret = request_threaded_irq(ctx->irq, NULL, fimc_irq_handler,
+               IRQF_ONESHOT, "drm_fimc", ctx);
+       if (ret < 0) {
+               dev_err(dev, "failed to request irq.\n");
+               goto err_get_regs;
+       }
+
+       /* context initailization */
+       ctx->id = pdev->id;
+       ctx->pol = pdata->pol;
+       ctx->ddata = ddata;
+
+       ippdrv = &ctx->ippdrv;
+       ippdrv->dev = dev;
+       ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
+       ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
+       ippdrv->check_property = fimc_ippdrv_check_property;
+       ippdrv->reset = fimc_ippdrv_reset;
+       ippdrv->start = fimc_ippdrv_start;
+       ippdrv->stop = fimc_ippdrv_stop;
+       ret = fimc_init_prop_list(ippdrv);
+       if (ret < 0) {
+               dev_err(dev, "failed to init property list.\n");
+               goto err_get_irq;
+       }
+
+       DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
+               (int)ippdrv);
+
+       mutex_init(&ctx->lock);
+       platform_set_drvdata(pdev, ctx);
+
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+
+       ret = exynos_drm_ippdrv_register(ippdrv);
+       if (ret < 0) {
+               dev_err(dev, "failed to register drm fimc device.\n");
+               goto err_ippdrv_register;
+       }
+
+       dev_info(&pdev->dev, "drm fimc registered successfully.\n");
+
+       return 0;
+
+err_ippdrv_register:
+       devm_kfree(dev, ippdrv->prop_list);
+       pm_runtime_disable(dev);
+err_get_irq:
+       free_irq(ctx->irq, ctx);
+err_get_regs:
+       devm_iounmap(dev, ctx->regs);
+err_clk:
+       clk_put(ctx->sclk_fimc_clk);
+       clk_put(ctx->fimc_clk);
+       clk_put(ctx->wb_clk);
+       clk_put(ctx->wb_b_clk);
+err_ctx:
+       devm_kfree(dev, ctx);
+       return ret;
+}
+
+static int __devexit fimc_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+       devm_kfree(dev, ippdrv->prop_list);
+       exynos_drm_ippdrv_unregister(ippdrv);
+       mutex_destroy(&ctx->lock);
+
+       pm_runtime_set_suspended(dev);
+       pm_runtime_disable(dev);
+
+       free_irq(ctx->irq, ctx);
+       devm_iounmap(dev, ctx->regs);
+
+       clk_put(ctx->sclk_fimc_clk);
+       clk_put(ctx->fimc_clk);
+       clk_put(ctx->wb_clk);
+       clk_put(ctx->wb_b_clk);
+
+       devm_kfree(dev, ctx);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_suspend(struct device *dev)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+       if (pm_runtime_suspended(dev))
+               return 0;
+
+       return fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_resume(struct device *dev)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+       if (!pm_runtime_suspended(dev))
+               return fimc_clk_ctrl(ctx, true);
+
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimc_runtime_suspend(struct device *dev)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+       return  fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_runtime_resume(struct device *dev)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+       return  fimc_clk_ctrl(ctx, true);
+}
+#endif
+
+static struct fimc_driverdata exynos4210_fimc_data = {
+       .parent_clk = "mout_mpll",
+};
+
+static struct fimc_driverdata exynos4410_fimc_data = {
+       .parent_clk = "mout_mpll_user",
+};
+
+static struct platform_device_id fimc_driver_ids[] = {
+       {
+               .name           = "exynos4210-fimc",
+               .driver_data    = (unsigned long)&exynos4210_fimc_data,
+       }, {
+               .name           = "exynos4412-fimc",
+               .driver_data    = (unsigned long)&exynos4410_fimc_data,
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
+
+static const struct dev_pm_ops fimc_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
+       SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
+};
+
+struct platform_driver fimc_driver = {
+       .probe          = fimc_probe,
+       .remove         = __devexit_p(fimc_remove),
+       .id_table       = fimc_driver_ids,
+       .driver         = {
+               .name   = "exynos-drm-fimc",
+               .owner  = THIS_MODULE,
+               .pm     = &fimc_pm_ops,
+       },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.h b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
new file mode 100644 (file)
index 0000000..dc970fa
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *     Jinyoung Jeon <jy0.jeon@samsung.com>
+ *     Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_FIMC_H_
+#define _EXYNOS_DRM_FIMC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_FIMC_H_ */
index 130a2b5..bf0d9ba 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/of_device.h>
 #include <linux/pm_runtime.h>
 
 #include <video/samsung_fimd.h>
@@ -25,6 +26,7 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fbdev.h"
 #include "exynos_drm_crtc.h"
+#include "exynos_drm_iommu.h"
 
 /*
  * FIMD is stand for Fully Interactive Mobile Display and
@@ -61,11 +63,11 @@ struct fimd_driver_data {
        unsigned int timing_base;
 };
 
-struct fimd_driver_data exynos4_fimd_driver_data = {
+static struct fimd_driver_data exynos4_fimd_driver_data = {
        .timing_base = 0x0,
 };
 
-struct fimd_driver_data exynos5_fimd_driver_data = {
+static struct fimd_driver_data exynos5_fimd_driver_data = {
        .timing_base = 0x20000,
 };
 
@@ -78,10 +80,10 @@ struct fimd_win_data {
        unsigned int            fb_height;
        unsigned int            bpp;
        dma_addr_t              dma_addr;
-       void __iomem            *vaddr;
        unsigned int            buf_offsize;
        unsigned int            line_size;      /* bytes */
        bool                    enabled;
+       bool                    resume;
 };
 
 struct fimd_context {
@@ -99,13 +101,34 @@ struct fimd_context {
        u32                             vidcon1;
        bool                            suspended;
        struct mutex                    lock;
+       wait_queue_head_t               wait_vsync_queue;
+       atomic_t                        wait_vsync_event;
 
        struct exynos_drm_panel_info *panel;
 };
 
+#ifdef CONFIG_OF
+static const struct of_device_id fimd_driver_dt_match[] = {
+       { .compatible = "samsung,exynos4-fimd",
+         .data = &exynos4_fimd_driver_data },
+       { .compatible = "samsung,exynos5-fimd",
+         .data = &exynos5_fimd_driver_data },
+       {},
+};
+MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
+#endif
+
 static inline struct fimd_driver_data *drm_fimd_get_driver_data(
        struct platform_device *pdev)
 {
+#ifdef CONFIG_OF
+       const struct of_device_id *of_id =
+                       of_match_device(fimd_driver_dt_match, &pdev->dev);
+
+       if (of_id)
+               return (struct fimd_driver_data *)of_id->data;
+#endif
+
        return (struct fimd_driver_data *)
                platform_get_device_id(pdev)->driver_data;
 }
@@ -240,7 +263,9 @@ static void fimd_commit(struct device *dev)
 
        /* setup horizontal and vertical display size. */
        val = VIDTCON2_LINEVAL(timing->yres - 1) |
-              VIDTCON2_HOZVAL(timing->xres - 1);
+              VIDTCON2_HOZVAL(timing->xres - 1) |
+              VIDTCON2_LINEVAL_E(timing->yres - 1) |
+              VIDTCON2_HOZVAL_E(timing->xres - 1);
        writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
 
        /* setup clock source, clock divider, enable dma. */
@@ -307,12 +332,32 @@ static void fimd_disable_vblank(struct device *dev)
        }
 }
 
+static void fimd_wait_for_vblank(struct device *dev)
+{
+       struct fimd_context *ctx = get_fimd_context(dev);
+
+       if (ctx->suspended)
+               return;
+
+       atomic_set(&ctx->wait_vsync_event, 1);
+
+       /*
+        * wait for FIMD to signal VSYNC interrupt or return after
+        * timeout which is set to 50ms (refresh rate of 20).
+        */
+       if (!wait_event_timeout(ctx->wait_vsync_queue,
+                               !atomic_read(&ctx->wait_vsync_event),
+                               DRM_HZ/20))
+               DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
+
 static struct exynos_drm_manager_ops fimd_manager_ops = {
        .dpms = fimd_dpms,
        .apply = fimd_apply,
        .commit = fimd_commit,
        .enable_vblank = fimd_enable_vblank,
        .disable_vblank = fimd_disable_vblank,
+       .wait_for_vblank = fimd_wait_for_vblank,
 };
 
 static void fimd_win_mode_set(struct device *dev,
@@ -351,7 +396,6 @@ static void fimd_win_mode_set(struct device *dev,
        win_data->fb_width = overlay->fb_width;
        win_data->fb_height = overlay->fb_height;
        win_data->dma_addr = overlay->dma_addr[0] + offset;
-       win_data->vaddr = overlay->vaddr[0] + offset;
        win_data->bpp = overlay->bpp;
        win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
                                (overlay->bpp >> 3);
@@ -361,9 +405,7 @@ static void fimd_win_mode_set(struct device *dev,
                        win_data->offset_x, win_data->offset_y);
        DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
                        win_data->ovl_width, win_data->ovl_height);
-       DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
-                       (unsigned long)win_data->dma_addr,
-                       (unsigned long)win_data->vaddr);
+       DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
        DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
                        overlay->fb_width, overlay->crtc_width);
 }
@@ -451,6 +493,8 @@ static void fimd_win_commit(struct device *dev, int zpos)
        struct fimd_win_data *win_data;
        int win = zpos;
        unsigned long val, alpha, size;
+       unsigned int last_x;
+       unsigned int last_y;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -496,24 +540,32 @@ static void fimd_win_commit(struct device *dev, int zpos)
 
        /* buffer size */
        val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) |
-               VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size);
+               VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size) |
+               VIDW_BUF_SIZE_OFFSET_E(win_data->buf_offsize) |
+               VIDW_BUF_SIZE_PAGEWIDTH_E(win_data->line_size);
        writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
 
        /* OSD position */
        val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) |
-               VIDOSDxA_TOPLEFT_Y(win_data->offset_y);
+               VIDOSDxA_TOPLEFT_Y(win_data->offset_y) |
+               VIDOSDxA_TOPLEFT_X_E(win_data->offset_x) |
+               VIDOSDxA_TOPLEFT_Y_E(win_data->offset_y);
        writel(val, ctx->regs + VIDOSD_A(win));
 
-       val = VIDOSDxB_BOTRIGHT_X(win_data->offset_x +
-                                       win_data->ovl_width - 1) |
-               VIDOSDxB_BOTRIGHT_Y(win_data->offset_y +
-                                       win_data->ovl_height - 1);
+       last_x = win_data->offset_x + win_data->ovl_width;
+       if (last_x)
+               last_x--;
+       last_y = win_data->offset_y + win_data->ovl_height;
+       if (last_y)
+               last_y--;
+
+       val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y) |
+               VIDOSDxB_BOTRIGHT_X_E(last_x) | VIDOSDxB_BOTRIGHT_Y_E(last_y);
+
        writel(val, ctx->regs + VIDOSD_B(win));
 
        DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
-                       win_data->offset_x, win_data->offset_y,
-                       win_data->offset_x + win_data->ovl_width - 1,
-                       win_data->offset_y + win_data->ovl_height - 1);
+                       win_data->offset_x, win_data->offset_y, last_x, last_y);
 
        /* hardware window 0 doesn't support alpha channel. */
        if (win != 0) {
@@ -573,6 +625,12 @@ static void fimd_win_disable(struct device *dev, int zpos)
 
        win_data = &ctx->win_data[win];
 
+       if (ctx->suspended) {
+               /* do not resume this window*/
+               win_data->resume = false;
+               return;
+       }
+
        /* protect windows */
        val = readl(ctx->regs + SHADOWCON);
        val |= SHADOWCON_WINx_PROTECT(win);
@@ -592,22 +650,10 @@ static void fimd_win_disable(struct device *dev, int zpos)
        win_data->enabled = false;
 }
 
-static void fimd_wait_for_vblank(struct device *dev)
-{
-       struct fimd_context *ctx = get_fimd_context(dev);
-       int ret;
-
-       ret = wait_for((__raw_readl(ctx->regs + VIDCON1) &
-                                       VIDCON1_VSTATUS_VSYNC), 50);
-       if (ret < 0)
-               DRM_DEBUG_KMS("vblank wait timed out.\n");
-}
-
 static struct exynos_drm_overlay_ops fimd_overlay_ops = {
        .mode_set = fimd_win_mode_set,
        .commit = fimd_win_commit,
        .disable = fimd_win_disable,
-       .wait_for_vblank = fimd_wait_for_vblank,
 };
 
 static struct exynos_drm_manager fimd_manager = {
@@ -623,7 +669,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
        struct drm_pending_vblank_event *e, *t;
        struct timeval now;
        unsigned long flags;
-       bool is_checked = false;
 
        spin_lock_irqsave(&drm_dev->event_lock, flags);
 
@@ -633,8 +678,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
                if (crtc != e->pipe)
                        continue;
 
-               is_checked = true;
-
                do_gettimeofday(&now);
                e->event.sequence = 0;
                e->event.tv_sec = now.tv_sec;
@@ -642,22 +685,7 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
 
                list_move_tail(&e->base.link, &e->base.file_priv->event_list);
                wake_up_interruptible(&e->base.file_priv->event_wait);
-       }
-
-       if (is_checked) {
-               /*
-                * call drm_vblank_put only in case that drm_vblank_get was
-                * called.
-                */
-               if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
-                       drm_vblank_put(drm_dev, crtc);
-
-               /*
-                * don't off vblank if vblank_disable_allowed is 1,
-                * because vblank would be off by timer handler.
-                */
-               if (!drm_dev->vblank_disable_allowed)
-                       drm_vblank_off(drm_dev, crtc);
+               drm_vblank_put(drm_dev, crtc);
        }
 
        spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@@ -684,6 +712,11 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
        drm_handle_vblank(drm_dev, manager->pipe);
        fimd_finish_pageflip(drm_dev, manager->pipe);
 
+       /* set wait vsync event to zero and wake up queue. */
+       if (atomic_read(&ctx->wait_vsync_event)) {
+               atomic_set(&ctx->wait_vsync_event, 0);
+               DRM_WAKEUP(&ctx->wait_vsync_queue);
+       }
 out:
        return IRQ_HANDLED;
 }
@@ -709,6 +742,10 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
         */
        drm_dev->vblank_disable_allowed = 1;
 
+       /* attach this sub driver to iommu mapping if supported. */
+       if (is_drm_iommu_supported(drm_dev))
+               drm_iommu_attach_device(drm_dev, dev);
+
        return 0;
 }
 
@@ -716,7 +753,9 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
 {
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       /* TODO. */
+       /* detach this sub driver from iommu mapping if supported. */
+       if (is_drm_iommu_supported(drm_dev))
+               drm_iommu_detach_device(drm_dev, dev);
 }
 
 static int fimd_calc_clkdiv(struct fimd_context *ctx,
@@ -805,11 +844,38 @@ static int fimd_clock(struct fimd_context *ctx, bool enable)
        return 0;
 }
 
+static void fimd_window_suspend(struct device *dev)
+{
+       struct fimd_context *ctx = get_fimd_context(dev);
+       struct fimd_win_data *win_data;
+       int i;
+
+       for (i = 0; i < WINDOWS_NR; i++) {
+               win_data = &ctx->win_data[i];
+               win_data->resume = win_data->enabled;
+               fimd_win_disable(dev, i);
+       }
+       fimd_wait_for_vblank(dev);
+}
+
+static void fimd_window_resume(struct device *dev)
+{
+       struct fimd_context *ctx = get_fimd_context(dev);
+       struct fimd_win_data *win_data;
+       int i;
+
+       for (i = 0; i < WINDOWS_NR; i++) {
+               win_data = &ctx->win_data[i];
+               win_data->enabled = win_data->resume;
+               win_data->resume = false;
+       }
+}
+
 static int fimd_activate(struct fimd_context *ctx, bool enable)
 {
+       struct device *dev = ctx->subdrv.dev;
        if (enable) {
                int ret;
-               struct device *dev = ctx->subdrv.dev;
 
                ret = fimd_clock(ctx, true);
                if (ret < 0)
@@ -820,7 +886,11 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
                /* if vblank was enabled status, enable it again. */
                if (test_and_clear_bit(0, &ctx->irq_flags))
                        fimd_enable_vblank(dev);
+
+               fimd_window_resume(dev);
        } else {
+               fimd_window_suspend(dev);
+
                fimd_clock(ctx, false);
                ctx->suspended = true;
        }
@@ -857,18 +927,16 @@ static int __devinit fimd_probe(struct platform_device *pdev)
        if (!ctx)
                return -ENOMEM;
 
-       ctx->bus_clk = clk_get(dev, "fimd");
+       ctx->bus_clk = devm_clk_get(dev, "fimd");
        if (IS_ERR(ctx->bus_clk)) {
                dev_err(dev, "failed to get bus clock\n");
-               ret = PTR_ERR(ctx->bus_clk);
-               goto err_clk_get;
+               return PTR_ERR(ctx->bus_clk);
        }
 
-       ctx->lcd_clk = clk_get(dev, "sclk_fimd");
+       ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
        if (IS_ERR(ctx->lcd_clk)) {
                dev_err(dev, "failed to get lcd clock\n");
-               ret = PTR_ERR(ctx->lcd_clk);
-               goto err_bus_clk;
+               return PTR_ERR(ctx->lcd_clk);
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -876,14 +944,13 @@ static int __devinit fimd_probe(struct platform_device *pdev)
        ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
        if (!ctx->regs) {
                dev_err(dev, "failed to map registers\n");
-               ret = -ENXIO;
-               goto err_clk;
+               return -ENXIO;
        }
 
        res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
        if (!res) {
                dev_err(dev, "irq request failed.\n");
-               goto err_clk;
+               return -ENXIO;
        }
 
        ctx->irq = res->start;
@@ -892,13 +959,15 @@ static int __devinit fimd_probe(struct platform_device *pdev)
                                                        0, "drm_fimd", ctx);
        if (ret) {
                dev_err(dev, "irq request failed.\n");
-               goto err_clk;
+               return ret;
        }
 
        ctx->vidcon0 = pdata->vidcon0;
        ctx->vidcon1 = pdata->vidcon1;
        ctx->default_win = pdata->default_win;
        ctx->panel = panel;
+       DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+       atomic_set(&ctx->wait_vsync_event, 0);
 
        subdrv = &ctx->subdrv;
 
@@ -926,17 +995,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
        exynos_drm_subdrv_register(subdrv);
 
        return 0;
-
-err_clk:
-       clk_disable(ctx->lcd_clk);
-       clk_put(ctx->lcd_clk);
-
-err_bus_clk:
-       clk_disable(ctx->bus_clk);
-       clk_put(ctx->bus_clk);
-
-err_clk_get:
-       return ret;
 }
 
 static int __devexit fimd_remove(struct platform_device *pdev)
@@ -960,9 +1018,6 @@ static int __devexit fimd_remove(struct platform_device *pdev)
 out:
        pm_runtime_disable(dev);
 
-       clk_put(ctx->lcd_clk);
-       clk_put(ctx->bus_clk);
-
        return 0;
 }
 
@@ -1056,5 +1111,6 @@ struct platform_driver fimd_driver = {
                .name   = "exynos4-fb",
                .owner  = THIS_MODULE,
                .pm     = &fimd_pm_ops,
+               .of_match_table = of_match_ptr(fimd_driver_dt_match),
        },
 };
index f7aab24..6ffa076 100644 (file)
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <linux/workqueue.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-attrs.h>
 
 #include <drm/drmP.h>
 #include <drm/exynos_drm.h>
 #include "exynos_drm_drv.h"
 #include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
 
 #define G2D_HW_MAJOR_VER               4
 #define G2D_HW_MINOR_VER               1
 #define G2D_CMDLIST_POOL_SIZE          (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
 #define G2D_CMDLIST_DATA_NUM           (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
 
+#define MAX_BUF_ADDR_NR                        6
+
+/* maximum buffer pool size of userptr is 64MB as default */
+#define MAX_POOL               (64 * 1024 * 1024)
+
+enum {
+       BUF_TYPE_GEM = 1,
+       BUF_TYPE_USERPTR,
+};
+
 /* cmdlist data structure */
 struct g2d_cmdlist {
-       u32     head;
-       u32     data[G2D_CMDLIST_DATA_NUM];
-       u32     last;   /* last data offset */
+       u32             head;
+       unsigned long   data[G2D_CMDLIST_DATA_NUM];
+       u32             last;   /* last data offset */
 };
 
 struct drm_exynos_pending_g2d_event {
@@ -104,15 +117,26 @@ struct drm_exynos_pending_g2d_event {
        struct drm_exynos_g2d_event     event;
 };
 
-struct g2d_gem_node {
+struct g2d_cmdlist_userptr {
        struct list_head        list;
-       unsigned int            handle;
+       dma_addr_t              dma_addr;
+       unsigned long           userptr;
+       unsigned long           size;
+       struct page             **pages;
+       unsigned int            npages;
+       struct sg_table         *sgt;
+       struct vm_area_struct   *vma;
+       atomic_t                refcount;
+       bool                    in_pool;
+       bool                    out_of_list;
 };
 
 struct g2d_cmdlist_node {
        struct list_head        list;
        struct g2d_cmdlist      *cmdlist;
-       unsigned int            gem_nr;
+       unsigned int            map_nr;
+       unsigned long           handles[MAX_BUF_ADDR_NR];
+       unsigned int            obj_type[MAX_BUF_ADDR_NR];
        dma_addr_t              dma_addr;
 
        struct drm_exynos_pending_g2d_event     *event;
@@ -122,6 +146,7 @@ struct g2d_runqueue_node {
        struct list_head        list;
        struct list_head        run_cmdlist;
        struct list_head        event_list;
+       struct drm_file         *filp;
        pid_t                   pid;
        struct completion       complete;
        int                     async;
@@ -143,23 +168,33 @@ struct g2d_data {
        struct mutex                    cmdlist_mutex;
        dma_addr_t                      cmdlist_pool;
        void                            *cmdlist_pool_virt;
+       struct dma_attrs                cmdlist_dma_attrs;
 
        /* runqueue*/
        struct g2d_runqueue_node        *runqueue_node;
        struct list_head                runqueue;
        struct mutex                    runqueue_mutex;
        struct kmem_cache               *runqueue_slab;
+
+       unsigned long                   current_pool;
+       unsigned long                   max_pool;
 };
 
 static int g2d_init_cmdlist(struct g2d_data *g2d)
 {
        struct device *dev = g2d->dev;
        struct g2d_cmdlist_node *node = g2d->cmdlist_node;
+       struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
        int nr;
        int ret;
 
-       g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE,
-                                               &g2d->cmdlist_pool, GFP_KERNEL);
+       init_dma_attrs(&g2d->cmdlist_dma_attrs);
+       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
+
+       g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev,
+                                               G2D_CMDLIST_POOL_SIZE,
+                                               &g2d->cmdlist_pool, GFP_KERNEL,
+                                               &g2d->cmdlist_dma_attrs);
        if (!g2d->cmdlist_pool_virt) {
                dev_err(dev, "failed to allocate dma memory\n");
                return -ENOMEM;
@@ -184,18 +219,20 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
        return 0;
 
 err:
-       dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
-                       g2d->cmdlist_pool);
+       dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+                       g2d->cmdlist_pool_virt,
+                       g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
        return ret;
 }
 
 static void g2d_fini_cmdlist(struct g2d_data *g2d)
 {
-       struct device *dev = g2d->dev;
+       struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
 
        kfree(g2d->cmdlist_node);
-       dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
-                       g2d->cmdlist_pool);
+       dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+                       g2d->cmdlist_pool_virt,
+                       g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
 }
 
 static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
@@ -245,62 +282,300 @@ add_to_list:
                list_add_tail(&node->event->base.link, &g2d_priv->event_list);
 }
 
-static int g2d_get_cmdlist_gem(struct drm_device *drm_dev,
-                              struct drm_file *file,
-                              struct g2d_cmdlist_node *node)
+static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
+                                       unsigned long obj,
+                                       bool force)
 {
-       struct drm_exynos_file_private *file_priv = file->driver_priv;
+       struct g2d_cmdlist_userptr *g2d_userptr =
+                                       (struct g2d_cmdlist_userptr *)obj;
+
+       if (!obj)
+               return;
+
+       if (force)
+               goto out;
+
+       atomic_dec(&g2d_userptr->refcount);
+
+       if (atomic_read(&g2d_userptr->refcount) > 0)
+               return;
+
+       if (g2d_userptr->in_pool)
+               return;
+
+out:
+       exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
+                                       DMA_BIDIRECTIONAL);
+
+       exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
+                                       g2d_userptr->npages,
+                                       g2d_userptr->vma);
+
+       if (!g2d_userptr->out_of_list)
+               list_del_init(&g2d_userptr->list);
+
+       sg_free_table(g2d_userptr->sgt);
+       kfree(g2d_userptr->sgt);
+       g2d_userptr->sgt = NULL;
+
+       kfree(g2d_userptr->pages);
+       g2d_userptr->pages = NULL;
+       kfree(g2d_userptr);
+       g2d_userptr = NULL;
+}
+
+dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
+                                       unsigned long userptr,
+                                       unsigned long size,
+                                       struct drm_file *filp,
+                                       unsigned long *obj)
+{
+       struct drm_exynos_file_private *file_priv = filp->driver_priv;
+       struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+       struct g2d_cmdlist_userptr *g2d_userptr;
+       struct g2d_data *g2d;
+       struct page **pages;
+       struct sg_table *sgt;
+       struct vm_area_struct *vma;
+       unsigned long start, end;
+       unsigned int npages, offset;
+       int ret;
+
+       if (!size) {
+               DRM_ERROR("invalid userptr size.\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       g2d = dev_get_drvdata(g2d_priv->dev);
+
+       /* check if userptr already exists in userptr_list. */
+       list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
+               if (g2d_userptr->userptr == userptr) {
+                       /*
+                        * also check size because there could be same address
+                        * and different size.
+                        */
+                       if (g2d_userptr->size == size) {
+                               atomic_inc(&g2d_userptr->refcount);
+                               *obj = (unsigned long)g2d_userptr;
+
+                               return &g2d_userptr->dma_addr;
+                       }
+
+                       /*
+                        * at this moment, maybe g2d dma is accessing this
+                        * g2d_userptr memory region so just remove this
+                        * g2d_userptr object from userptr_list not to be
+                        * referred again and also except it the userptr
+                        * pool to be released after the dma access completion.
+                        */
+                       g2d_userptr->out_of_list = true;
+                       g2d_userptr->in_pool = false;
+                       list_del_init(&g2d_userptr->list);
+
+                       break;
+               }
+       }
+
+       g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
+       if (!g2d_userptr) {
+               DRM_ERROR("failed to allocate g2d_userptr.\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       atomic_set(&g2d_userptr->refcount, 1);
+
+       start = userptr & PAGE_MASK;
+       offset = userptr & ~PAGE_MASK;
+       end = PAGE_ALIGN(userptr + size);
+       npages = (end - start) >> PAGE_SHIFT;
+       g2d_userptr->npages = npages;
+
+       pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL);
+       if (!pages) {
+               DRM_ERROR("failed to allocate pages.\n");
+               kfree(g2d_userptr);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       vma = find_vma(current->mm, userptr);
+       if (!vma) {
+               DRM_ERROR("failed to get vm region.\n");
+               ret = -EFAULT;
+               goto err_free_pages;
+       }
+
+       if (vma->vm_end < userptr + size) {
+               DRM_ERROR("vma is too small.\n");
+               ret = -EFAULT;
+               goto err_free_pages;
+       }
+
+       g2d_userptr->vma = exynos_gem_get_vma(vma);
+       if (!g2d_userptr->vma) {
+               DRM_ERROR("failed to copy vma.\n");
+               ret = -ENOMEM;
+               goto err_free_pages;
+       }
+
+       g2d_userptr->size = size;
+
+       ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
+                                               npages, pages, vma);
+       if (ret < 0) {
+               DRM_ERROR("failed to get user pages from userptr.\n");
+               goto err_put_vma;
+       }
+
+       g2d_userptr->pages = pages;
+
+       sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
+       if (!sgt) {
+               DRM_ERROR("failed to allocate sg table.\n");
+               ret = -ENOMEM;
+               goto err_free_userptr;
+       }
+
+       ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
+                                       size, GFP_KERNEL);
+       if (ret < 0) {
+               DRM_ERROR("failed to get sgt from pages.\n");
+               goto err_free_sgt;
+       }
+
+       g2d_userptr->sgt = sgt;
+
+       ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt,
+                                               DMA_BIDIRECTIONAL);
+       if (ret < 0) {
+               DRM_ERROR("failed to map sgt with dma region.\n");
+               goto err_free_sgt;
+       }
+
+       g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
+       g2d_userptr->userptr = userptr;
+
+       list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
+
+       if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
+               g2d->current_pool += npages << PAGE_SHIFT;
+               g2d_userptr->in_pool = true;
+       }
+
+       *obj = (unsigned long)g2d_userptr;
+
+       return &g2d_userptr->dma_addr;
+
+err_free_sgt:
+       sg_free_table(sgt);
+       kfree(sgt);
+       sgt = NULL;
+
+err_free_userptr:
+       exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
+                                       g2d_userptr->npages,
+                                       g2d_userptr->vma);
+
+err_put_vma:
+       exynos_gem_put_vma(g2d_userptr->vma);
+
+err_free_pages:
+       kfree(pages);
+       kfree(g2d_userptr);
+       pages = NULL;
+       g2d_userptr = NULL;
+
+       return ERR_PTR(ret);
+}
+
+static void g2d_userptr_free_all(struct drm_device *drm_dev,
+                                       struct g2d_data *g2d,
+                                       struct drm_file *filp)
+{
+       struct drm_exynos_file_private *file_priv = filp->driver_priv;
        struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+       struct g2d_cmdlist_userptr *g2d_userptr, *n;
+
+       list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
+               if (g2d_userptr->in_pool)
+                       g2d_userptr_put_dma_addr(drm_dev,
+                                               (unsigned long)g2d_userptr,
+                                               true);
+
+       g2d->current_pool = 0;
+}
+
+static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
+                               struct g2d_cmdlist_node *node,
+                               struct drm_device *drm_dev,
+                               struct drm_file *file)
+{
        struct g2d_cmdlist *cmdlist = node->cmdlist;
-       dma_addr_t *addr;
        int offset;
        int i;
 
-       for (i = 0; i < node->gem_nr; i++) {
-               struct g2d_gem_node *gem_node;
-
-               gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
-               if (!gem_node) {
-                       dev_err(g2d_priv->dev, "failed to allocate gem node\n");
-                       return -ENOMEM;
-               }
+       for (i = 0; i < node->map_nr; i++) {
+               unsigned long handle;
+               dma_addr_t *addr;
 
                offset = cmdlist->last - (i * 2 + 1);
-               gem_node->handle = cmdlist->data[offset];
-
-               addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle,
-                                                  file);
-               if (IS_ERR(addr)) {
-                       node->gem_nr = i;
-                       kfree(gem_node);
-                       return PTR_ERR(addr);
+               handle = cmdlist->data[offset];
+
+               if (node->obj_type[i] == BUF_TYPE_GEM) {
+                       addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
+                                                               file);
+                       if (IS_ERR(addr)) {
+                               node->map_nr = i;
+                               return -EFAULT;
+                       }
+               } else {
+                       struct drm_exynos_g2d_userptr g2d_userptr;
+
+                       if (copy_from_user(&g2d_userptr, (void __user *)handle,
+                               sizeof(struct drm_exynos_g2d_userptr))) {
+                               node->map_nr = i;
+                               return -EFAULT;
+                       }
+
+                       addr = g2d_userptr_get_dma_addr(drm_dev,
+                                                       g2d_userptr.userptr,
+                                                       g2d_userptr.size,
+                                                       file,
+                                                       &handle);
+                       if (IS_ERR(addr)) {
+                               node->map_nr = i;
+                               return -EFAULT;
+                       }
                }
 
                cmdlist->data[offset] = *addr;
-               list_add_tail(&gem_node->list, &g2d_priv->gem_list);
-               g2d_priv->gem_nr++;
+               node->handles[i] = handle;
        }
 
        return 0;
 }
 
-static void g2d_put_cmdlist_gem(struct drm_device *drm_dev,
-                               struct drm_file *file,
-                               unsigned int nr)
+static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
+                                 struct g2d_cmdlist_node *node,
+                                 struct drm_file *filp)
 {
-       struct drm_exynos_file_private *file_priv = file->driver_priv;
-       struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
-       struct g2d_gem_node *node, *n;
+       struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
+       int i;
 
-       list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) {
-               if (!nr)
-                       break;
+       for (i = 0; i < node->map_nr; i++) {
+               unsigned long handle = node->handles[i];
 
-               exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file);
-               list_del_init(&node->list);
-               kfree(node);
-               nr--;
+               if (node->obj_type[i] == BUF_TYPE_GEM)
+                       exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
+                                                       filp);
+               else
+                       g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
+                                                       false);
+
+               node->handles[i] = 0;
        }
+
+       node->map_nr = 0;
 }
 
 static void g2d_dma_start(struct g2d_data *g2d,
@@ -337,10 +612,18 @@ static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
 static void g2d_free_runqueue_node(struct g2d_data *g2d,
                                   struct g2d_runqueue_node *runqueue_node)
 {
+       struct g2d_cmdlist_node *node;
+
        if (!runqueue_node)
                return;
 
        mutex_lock(&g2d->cmdlist_mutex);
+       /*
+        * commands in run_cmdlist have been completed so unmap all gem
+        * objects in each command node so that they are unreferenced.
+        */
+       list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
+               g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
        list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
        mutex_unlock(&g2d->cmdlist_mutex);
 
@@ -430,15 +713,28 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
+static int g2d_check_reg_offset(struct device *dev,
+                               struct g2d_cmdlist_node *node,
                                int nr, bool for_addr)
 {
+       struct g2d_cmdlist *cmdlist = node->cmdlist;
        int reg_offset;
        int index;
        int i;
 
        for (i = 0; i < nr; i++) {
                index = cmdlist->last - 2 * (i + 1);
+
+               if (for_addr) {
+                       /* check userptr buffer type. */
+                       reg_offset = (cmdlist->data[index] &
+                                       ~0x7fffffff) >> 31;
+                       if (reg_offset) {
+                               node->obj_type[i] = BUF_TYPE_USERPTR;
+                               cmdlist->data[index] &= ~G2D_BUF_USERPTR;
+                       }
+               }
+
                reg_offset = cmdlist->data[index] & ~0xfffff000;
 
                if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
@@ -455,6 +751,9 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
                case G2D_MSK_BASE_ADDR:
                        if (!for_addr)
                                goto err;
+
+                       if (node->obj_type[i] != BUF_TYPE_USERPTR)
+                               node->obj_type[i] = BUF_TYPE_GEM;
                        break;
                default:
                        if (for_addr)
@@ -466,7 +765,7 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
        return 0;
 
 err:
-       dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]);
+       dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
        return -EINVAL;
 }
 
@@ -566,7 +865,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
        }
 
        /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
-       size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2;
+       size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
        if (size > G2D_CMDLIST_DATA_NUM) {
                dev_err(dev, "cmdlist size is too big\n");
                ret = -EINVAL;
@@ -583,29 +882,29 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
        }
        cmdlist->last += req->cmd_nr * 2;
 
-       ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false);
+       ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
        if (ret < 0)
                goto err_free_event;
 
-       node->gem_nr = req->cmd_gem_nr;
-       if (req->cmd_gem_nr) {
-               struct drm_exynos_g2d_cmd *cmd_gem;
+       node->map_nr = req->cmd_buf_nr;
+       if (req->cmd_buf_nr) {
+               struct drm_exynos_g2d_cmd *cmd_buf;
 
-               cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem;
+               cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf;
 
                if (copy_from_user(cmdlist->data + cmdlist->last,
-                                       (void __user *)cmd_gem,
-                                       sizeof(*cmd_gem) * req->cmd_gem_nr)) {
+                                       (void __user *)cmd_buf,
+                                       sizeof(*cmd_buf) * req->cmd_buf_nr)) {
                        ret = -EFAULT;
                        goto err_free_event;
                }
-               cmdlist->last += req->cmd_gem_nr * 2;
+               cmdlist->last += req->cmd_buf_nr * 2;
 
-               ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true);
+               ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
                if (ret < 0)
                        goto err_free_event;
 
-               ret = g2d_get_cmdlist_gem(drm_dev, file, node);
+               ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
                if (ret < 0)
                        goto err_unmap;
        }
@@ -624,7 +923,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
        return 0;
 
 err_unmap:
-       g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr);
+       g2d_unmap_cmdlist_gem(g2d, node, file);
 err_free_event:
        if (node->event) {
                spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -680,6 +979,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
 
        mutex_lock(&g2d->runqueue_mutex);
        runqueue_node->pid = current->pid;
+       runqueue_node->filp = file;
        list_add_tail(&runqueue_node->list, &g2d->runqueue);
        if (!g2d->runqueue_node)
                g2d_exec_runqueue(g2d);
@@ -696,6 +996,43 @@ out:
 }
 EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
 
+static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+       struct g2d_data *g2d;
+       int ret;
+
+       g2d = dev_get_drvdata(dev);
+       if (!g2d)
+               return -EFAULT;
+
+       /* allocate dma-aware cmdlist buffer. */
+       ret = g2d_init_cmdlist(g2d);
+       if (ret < 0) {
+               dev_err(dev, "cmdlist init failed\n");
+               return ret;
+       }
+
+       if (!is_drm_iommu_supported(drm_dev))
+               return 0;
+
+       ret = drm_iommu_attach_device(drm_dev, dev);
+       if (ret < 0) {
+               dev_err(dev, "failed to enable iommu.\n");
+               g2d_fini_cmdlist(g2d);
+       }
+
+       return ret;
+
+}
+
+static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+       if (!is_drm_iommu_supported(drm_dev))
+               return;
+
+       drm_iommu_detach_device(drm_dev, dev);
+}
+
 static int g2d_open(struct drm_device *drm_dev, struct device *dev,
                        struct drm_file *file)
 {
@@ -713,7 +1050,7 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
 
        INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
        INIT_LIST_HEAD(&g2d_priv->event_list);
-       INIT_LIST_HEAD(&g2d_priv->gem_list);
+       INIT_LIST_HEAD(&g2d_priv->userptr_list);
 
        return 0;
 }
@@ -734,11 +1071,21 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
                return;
 
        mutex_lock(&g2d->cmdlist_mutex);
-       list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list)
+       list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
+               /*
+                * unmap all gem objects not completed.
+                *
+                * P.S. if current process was terminated forcely then
+                * there may be some commands in inuse_cmdlist so unmap
+                * them.
+                */
+               g2d_unmap_cmdlist_gem(g2d, node, file);
                list_move_tail(&node->list, &g2d->free_cmdlist);
+       }
        mutex_unlock(&g2d->cmdlist_mutex);
 
-       g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr);
+       /* release all g2d_userptr in pool. */
+       g2d_userptr_free_all(drm_dev, g2d, file);
 
        kfree(file_priv->g2d_priv);
 }
@@ -778,15 +1125,11 @@ static int __devinit g2d_probe(struct platform_device *pdev)
        mutex_init(&g2d->cmdlist_mutex);
        mutex_init(&g2d->runqueue_mutex);
 
-       ret = g2d_init_cmdlist(g2d);
-       if (ret < 0)
-               goto err_destroy_workqueue;
-
-       g2d->gate_clk = clk_get(dev, "fimg2d");
+       g2d->gate_clk = devm_clk_get(dev, "fimg2d");
        if (IS_ERR(g2d->gate_clk)) {
                dev_err(dev, "failed to get gate clock\n");
                ret = PTR_ERR(g2d->gate_clk);
-               goto err_fini_cmdlist;
+               goto err_destroy_workqueue;
        }
 
        pm_runtime_enable(dev);
@@ -814,10 +1157,14 @@ static int __devinit g2d_probe(struct platform_device *pdev)
                goto err_put_clk;
        }
 
+       g2d->max_pool = MAX_POOL;
+
        platform_set_drvdata(pdev, g2d);
 
        subdrv = &g2d->subdrv;
        subdrv->dev = dev;
+       subdrv->probe = g2d_subdrv_probe;
+       subdrv->remove = g2d_subdrv_remove;
        subdrv->open = g2d_open;
        subdrv->close = g2d_close;
 
@@ -834,9 +1181,6 @@ static int __devinit g2d_probe(struct platform_device *pdev)
 
 err_put_clk:
        pm_runtime_disable(dev);
-       clk_put(g2d->gate_clk);
-err_fini_cmdlist:
-       g2d_fini_cmdlist(g2d);
 err_destroy_workqueue:
        destroy_workqueue(g2d->g2d_workq);
 err_destroy_slab:
@@ -857,7 +1201,6 @@ static int __devexit g2d_remove(struct platform_device *pdev)
        }
 
        pm_runtime_disable(&pdev->dev);
-       clk_put(g2d->gate_clk);
 
        g2d_fini_cmdlist(g2d);
        destroy_workqueue(g2d->g2d_workq);
index d254556..d48183e 100644 (file)
@@ -83,157 +83,40 @@ static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
 
 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
 {
-       if (!IS_NONCONTIG_BUFFER(flags)) {
-               if (size >= SZ_1M)
-                       return roundup(size, SECTION_SIZE);
-               else if (size >= SZ_64K)
-                       return roundup(size, SZ_64K);
-               else
-                       goto out;
-       }
-out:
-       return roundup(size, PAGE_SIZE);
-}
-
-struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
-                                               gfp_t gfpmask)
-{
-       struct page *p, **pages;
-       int i, npages;
-
-       npages = obj->size >> PAGE_SHIFT;
-
-       pages = drm_malloc_ab(npages, sizeof(struct page *));
-       if (pages == NULL)
-               return ERR_PTR(-ENOMEM);
-
-       for (i = 0; i < npages; i++) {
-               p = alloc_page(gfpmask);
-               if (IS_ERR(p))
-                       goto fail;
-               pages[i] = p;
-       }
-
-       return pages;
-
-fail:
-       while (--i)
-               __free_page(pages[i]);
-
-       drm_free_large(pages);
-       return ERR_CAST(p);
-}
-
-static void exynos_gem_put_pages(struct drm_gem_object *obj,
-                                       struct page **pages)
-{
-       int npages;
-
-       npages = obj->size >> PAGE_SHIFT;
-
-       while (--npages >= 0)
-               __free_page(pages[npages]);
+       /* TODO */
 
-       drm_free_large(pages);
+       return roundup(size, PAGE_SIZE);
 }
 
-static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
+static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
                                        struct vm_area_struct *vma,
                                        unsigned long f_vaddr,
                                        pgoff_t page_offset)
 {
        struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
        struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
+       struct scatterlist *sgl;
        unsigned long pfn;
+       int i;
 
-       if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
-               if (!buf->pages)
-                       return -EINTR;
-
-               pfn = page_to_pfn(buf->pages[page_offset++]);
-       } else
-               pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
-
-       return vm_insert_mixed(vma, f_vaddr, pfn);
-}
+       if (!buf->sgt)
+               return -EINTR;
 
-static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
-{
-       struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
-       struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
-       struct scatterlist *sgl;
-       struct page **pages;
-       unsigned int npages, i = 0;
-       int ret;
-
-       if (buf->pages) {
-               DRM_DEBUG_KMS("already allocated.\n");
+       if (page_offset >= (buf->size >> PAGE_SHIFT)) {
+               DRM_ERROR("invalid page offset\n");
                return -EINVAL;
        }
 
-       pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
-       if (IS_ERR(pages)) {
-               DRM_ERROR("failed to get pages.\n");
-               return PTR_ERR(pages);
-       }
-
-       npages = obj->size >> PAGE_SHIFT;
-       buf->page_size = PAGE_SIZE;
-
-       buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
-       if (!buf->sgt) {
-               DRM_ERROR("failed to allocate sg table.\n");
-               ret = -ENOMEM;
-               goto err;
-       }
-
-       ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
-       if (ret < 0) {
-               DRM_ERROR("failed to initialize sg table.\n");
-               ret = -EFAULT;
-               goto err1;
-       }
-
        sgl = buf->sgt->sgl;
-
-       /* set all pages to sg list. */
-       while (i < npages) {
-               sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
-               sg_dma_address(sgl) = page_to_phys(pages[i]);
-               i++;
-               sgl = sg_next(sgl);
+       for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
+               if (page_offset < (sgl->length >> PAGE_SHIFT))
+                       break;
+               page_offset -=  (sgl->length >> PAGE_SHIFT);
        }
 
-       /* add some codes for UNCACHED type here. TODO */
-
-       buf->pages = pages;
-       return ret;
-err1:
-       kfree(buf->sgt);
-       buf->sgt = NULL;
-err:
-       exynos_gem_put_pages(obj, pages);
-       return ret;
-
-}
-
-static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
-{
-       struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
-       struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
-
-       /*
-        * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
-        * allocated at gem fault handler.
-        */
-       sg_free_table(buf->sgt);
-       kfree(buf->sgt);
-       buf->sgt = NULL;
-
-       exynos_gem_put_pages(obj, buf->pages);
-       buf->pages = NULL;
+       pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
 
-       /* add some codes for UNCACHED type here. TODO */
+       return vm_insert_mixed(vma, f_vaddr, pfn);
 }
 
 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -270,9 +153,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
 
        DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
 
-       if (!buf->pages)
-               return;
-
        /*
         * do not release memory region from exporter.
         *
@@ -282,10 +162,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
        if (obj->import_attach)
                goto out;
 
-       if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
-               exynos_drm_gem_put_pages(obj);
-       else
-               exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
+       exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
 
 out:
        exynos_drm_fini_buf(obj->dev, buf);
@@ -364,22 +241,10 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
        /* set memory type and cache attribute from user side. */
        exynos_gem_obj->flags = flags;
 
-       /*
-        * allocate all pages as desired size if user wants to allocate
-        * physically non-continuous memory.
-        */
-       if (flags & EXYNOS_BO_NONCONTIG) {
-               ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
-               if (ret < 0) {
-                       drm_gem_object_release(&exynos_gem_obj->base);
-                       goto err_fini_buf;
-               }
-       } else {
-               ret = exynos_drm_alloc_buf(dev, buf, flags);
-               if (ret < 0) {
-                       drm_gem_object_release(&exynos_gem_obj->base);
-                       goto err_fini_buf;
-               }
+       ret = exynos_drm_alloc_buf(dev, buf, flags);
+       if (ret < 0) {
+               drm_gem_object_release(&exynos_gem_obj->base);
+               goto err_fini_buf;
        }
 
        return exynos_gem_obj;
@@ -412,14 +277,14 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
-void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
                                        unsigned int gem_handle,
-                                       struct drm_file *file_priv)
+                                       struct drm_file *filp)
 {
        struct exynos_drm_gem_obj *exynos_gem_obj;
        struct drm_gem_object *obj;
 
-       obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+       obj = drm_gem_object_lookup(dev, filp, gem_handle);
        if (!obj) {
                DRM_ERROR("failed to lookup gem object.\n");
                return ERR_PTR(-EINVAL);
@@ -427,25 +292,17 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
 
        exynos_gem_obj = to_exynos_gem_obj(obj);
 
-       if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
-               DRM_DEBUG_KMS("not support NONCONTIG type.\n");
-               drm_gem_object_unreference_unlocked(obj);
-
-               /* TODO */
-               return ERR_PTR(-EINVAL);
-       }
-
        return &exynos_gem_obj->buffer->dma_addr;
 }
 
 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
                                        unsigned int gem_handle,
-                                       struct drm_file *file_priv)
+                                       struct drm_file *filp)
 {
        struct exynos_drm_gem_obj *exynos_gem_obj;
        struct drm_gem_object *obj;
 
-       obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+       obj = drm_gem_object_lookup(dev, filp, gem_handle);
        if (!obj) {
                DRM_ERROR("failed to lookup gem object.\n");
                return;
@@ -453,14 +310,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
 
        exynos_gem_obj = to_exynos_gem_obj(obj);
 
-       if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
-               DRM_DEBUG_KMS("not support NONCONTIG type.\n");
-               drm_gem_object_unreference_unlocked(obj);
-
-               /* TODO */
-               return;
-       }
-
        drm_gem_object_unreference_unlocked(obj);
 
        /*
@@ -489,22 +338,57 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
                        &args->offset);
 }
 
+static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
+                                                       struct file *filp)
+{
+       struct drm_file *file_priv;
+
+       mutex_lock(&drm_dev->struct_mutex);
+
+       /* find current process's drm_file from filelist. */
+       list_for_each_entry(file_priv, &drm_dev->filelist, lhead) {
+               if (file_priv->filp == filp) {
+                       mutex_unlock(&drm_dev->struct_mutex);
+                       return file_priv;
+               }
+       }
+
+       mutex_unlock(&drm_dev->struct_mutex);
+       WARN_ON(1);
+
+       return ERR_PTR(-EFAULT);
+}
+
 static int exynos_drm_gem_mmap_buffer(struct file *filp,
                                      struct vm_area_struct *vma)
 {
        struct drm_gem_object *obj = filp->private_data;
        struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+       struct drm_device *drm_dev = obj->dev;
        struct exynos_drm_gem_buf *buffer;
-       unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
+       struct drm_file *file_priv;
+       unsigned long vm_size;
        int ret;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
        vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+       vma->vm_private_data = obj;
+       vma->vm_ops = drm_dev->driver->gem_vm_ops;
+
+       /* restore it to driver's fops. */
+       filp->f_op = fops_get(drm_dev->driver->fops);
+
+       file_priv = exynos_drm_find_drm_file(drm_dev, filp);
+       if (IS_ERR(file_priv))
+               return PTR_ERR(file_priv);
+
+       /* restore it to drm_file. */
+       filp->private_data = file_priv;
 
        update_vm_cache_attr(exynos_gem_obj, vma);
 
-       vm_size = usize = vma->vm_end - vma->vm_start;
+       vm_size = vma->vm_end - vma->vm_start;
 
        /*
         * a buffer contains information to physically continuous memory
@@ -516,40 +400,23 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
        if (vm_size > buffer->size)
                return -EINVAL;
 
-       if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
-               int i = 0;
-
-               if (!buffer->pages)
-                       return -EINVAL;
+       ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
+                               buffer->dma_addr, buffer->size,
+                               &buffer->dma_attrs);
+       if (ret < 0) {
+               DRM_ERROR("failed to mmap.\n");
+               return ret;
+       }
 
-               vma->vm_flags |= VM_MIXEDMAP;
+       /*
+        * take a reference to this mapping of the object. And this reference
+        * is unreferenced by the corresponding vm_close call.
+        */
+       drm_gem_object_reference(obj);
 
-               do {
-                       ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
-                       if (ret) {
-                               DRM_ERROR("failed to remap user space.\n");
-                               return ret;
-                       }
-
-                       uaddr += PAGE_SIZE;
-                       usize -= PAGE_SIZE;
-               } while (usize > 0);
-       } else {
-               /*
-                * get page frame number to physical memory to be mapped
-                * to user space.
-                */
-               pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
-                                                               PAGE_SHIFT;
-
-               DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
-
-               if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
-                                       vma->vm_page_prot)) {
-                       DRM_ERROR("failed to remap pfn range.\n");
-                       return -EAGAIN;
-               }
-       }
+       mutex_lock(&drm_dev->struct_mutex);
+       drm_vm_open_locked(drm_dev, vma);
+       mutex_unlock(&drm_dev->struct_mutex);
 
        return 0;
 }
@@ -578,16 +445,29 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       obj->filp->f_op = &exynos_drm_gem_fops;
-       obj->filp->private_data = obj;
+       /*
+        * Set specific mmper's fops. And it will be restored by
+        * exynos_drm_gem_mmap_buffer to dev->driver->fops.
+        * This is used to call specific mapper temporarily.
+        */
+       file_priv->filp->f_op = &exynos_drm_gem_fops;
 
-       addr = vm_mmap(obj->filp, 0, args->size,
+       /*
+        * Set gem object to private_data so that specific mmaper
+        * can get the gem object. And it will be restored by
+        * exynos_drm_gem_mmap_buffer to drm_file.
+        */
+       file_priv->filp->private_data = obj;
+
+       addr = vm_mmap(file_priv->filp, 0, args->size,
                        PROT_READ | PROT_WRITE, MAP_SHARED, 0);
 
        drm_gem_object_unreference_unlocked(obj);
 
-       if (IS_ERR((void *)addr))
+       if (IS_ERR((void *)addr)) {
+               file_priv->filp->private_data = file_priv;
                return PTR_ERR((void *)addr);
+       }
 
        args->mapped = addr;
 
@@ -622,6 +502,129 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
+struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
+{
+       struct vm_area_struct *vma_copy;
+
+       vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
+       if (!vma_copy)
+               return NULL;
+
+       if (vma->vm_ops && vma->vm_ops->open)
+               vma->vm_ops->open(vma);
+
+       if (vma->vm_file)
+               get_file(vma->vm_file);
+
+       memcpy(vma_copy, vma, sizeof(*vma));
+
+       vma_copy->vm_mm = NULL;
+       vma_copy->vm_next = NULL;
+       vma_copy->vm_prev = NULL;
+
+       return vma_copy;
+}
+
+void exynos_gem_put_vma(struct vm_area_struct *vma)
+{
+       if (!vma)
+               return;
+
+       if (vma->vm_ops && vma->vm_ops->close)
+               vma->vm_ops->close(vma);
+
+       if (vma->vm_file)
+               fput(vma->vm_file);
+
+       kfree(vma);
+}
+
+int exynos_gem_get_pages_from_userptr(unsigned long start,
+                                               unsigned int npages,
+                                               struct page **pages,
+                                               struct vm_area_struct *vma)
+{
+       int get_npages;
+
+       /* the memory region mmaped with VM_PFNMAP. */
+       if (vma_is_io(vma)) {
+               unsigned int i;
+
+               for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
+                       unsigned long pfn;
+                       int ret = follow_pfn(vma, start, &pfn);
+                       if (ret)
+                               return ret;
+
+                       pages[i] = pfn_to_page(pfn);
+               }
+
+               if (i != npages) {
+                       DRM_ERROR("failed to get user_pages.\n");
+                       return -EINVAL;
+               }
+
+               return 0;
+       }
+
+       get_npages = get_user_pages(current, current->mm, start,
+                                       npages, 1, 1, pages, NULL);
+       get_npages = max(get_npages, 0);
+       if (get_npages != npages) {
+               DRM_ERROR("failed to get user_pages.\n");
+               while (get_npages)
+                       put_page(pages[--get_npages]);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+void exynos_gem_put_pages_to_userptr(struct page **pages,
+                                       unsigned int npages,
+                                       struct vm_area_struct *vma)
+{
+       if (!vma_is_io(vma)) {
+               unsigned int i;
+
+               for (i = 0; i < npages; i++) {
+                       set_page_dirty_lock(pages[i]);
+
+                       /*
+                        * undo the reference we took when populating
+                        * the table.
+                        */
+                       put_page(pages[i]);
+               }
+       }
+}
+
+int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
+                               struct sg_table *sgt,
+                               enum dma_data_direction dir)
+{
+       int nents;
+
+       mutex_lock(&drm_dev->struct_mutex);
+
+       nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+       if (!nents) {
+               DRM_ERROR("failed to map sgl with dma.\n");
+               mutex_unlock(&drm_dev->struct_mutex);
+               return nents;
+       }
+
+       mutex_unlock(&drm_dev->struct_mutex);
+       return 0;
+}
+
+void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
+                               struct sg_table *sgt,
+                               enum dma_data_direction dir)
+{
+       dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+}
+
 int exynos_drm_gem_init_object(struct drm_gem_object *obj)
 {
        DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -753,9 +756,9 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        mutex_lock(&dev->struct_mutex);
 
-       ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
+       ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
        if (ret < 0)
-               DRM_ERROR("failed to map pages.\n");
+               DRM_ERROR("failed to map a buffer with user.\n");
 
        mutex_unlock(&dev->struct_mutex);
 
index 085b2a5..f11f2af 100644 (file)
  * exynos drm gem buffer structure.
  *
  * @kvaddr: kernel virtual address to allocated memory region.
+ * *userptr: user space address.
  * @dma_addr: bus address(accessed by dma) to allocated memory region.
  *     - this address could be physical address without IOMMU and
  *     device address with IOMMU.
+ * @write: whether pages will be written to by the caller.
+ * @pages: Array of backing pages.
  * @sgt: sg table to transfer page data.
- * @pages: contain all pages to allocated memory region.
- * @page_size: could be 4K, 64K or 1MB.
  * @size: size of allocated memory region.
+ * @pfnmap: indicate whether memory region from userptr is mmaped with
+ *     VM_PFNMAP or not.
  */
 struct exynos_drm_gem_buf {
        void __iomem            *kvaddr;
+       unsigned long           userptr;
        dma_addr_t              dma_addr;
-       struct sg_table         *sgt;
+       struct dma_attrs        dma_attrs;
+       unsigned int            write;
        struct page             **pages;
-       unsigned long           page_size;
+       struct sg_table         *sgt;
        unsigned long           size;
+       bool                    pfnmap;
 };
 
 /*
@@ -65,6 +71,7 @@ struct exynos_drm_gem_buf {
  *     or at framebuffer creation.
  * @size: size requested from user, in bytes and this size is aligned
  *     in page unit.
+ * @vma: a pointer to vm_area.
  * @flags: indicate memory type to allocated buffer and cache attruibute.
  *
  * P.S. this object would be transfered to user as kms_bo.handle so
@@ -74,6 +81,7 @@ struct exynos_drm_gem_obj {
        struct drm_gem_object           base;
        struct exynos_drm_gem_buf       *buffer;
        unsigned long                   size;
+       struct vm_area_struct           *vma;
        unsigned int                    flags;
 };
 
@@ -104,9 +112,9 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
  * other drivers such as 2d/3d acceleration drivers.
  * with this function call, gem object reference count would be increased.
  */
-void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
                                        unsigned int gem_handle,
-                                       struct drm_file *file_priv);
+                                       struct drm_file *filp);
 
 /*
  * put dma address from gem handle and this function could be used for
@@ -115,7 +123,7 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
  */
 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
                                        unsigned int gem_handle,
-                                       struct drm_file *file_priv);
+                                       struct drm_file *filp);
 
 /* get buffer offset to map to user space. */
 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
@@ -128,6 +136,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *file_priv);
 
+/* map user space allocated by malloc to pages. */
+int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
+                                     struct drm_file *file_priv);
+
 /* get buffer information to memory region allocated by gem. */
 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
                                      struct drm_file *file_priv);
@@ -163,4 +175,36 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 /* set vm_flags and we can change the vm attribute to other one at here. */
 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 
+static inline int vma_is_io(struct vm_area_struct *vma)
+{
+       return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
+}
+
+/* get a copy of a virtual memory region. */
+struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
+
+/* release a userspace virtual memory area. */
+void exynos_gem_put_vma(struct vm_area_struct *vma);
+
+/* get pages from user space. */
+int exynos_gem_get_pages_from_userptr(unsigned long start,
+                                               unsigned int npages,
+                                               struct page **pages,
+                                               struct vm_area_struct *vma);
+
+/* drop the reference to pages. */
+void exynos_gem_put_pages_to_userptr(struct page **pages,
+                                       unsigned int npages,
+                                       struct vm_area_struct *vma);
+
+/* map sgt with dma region. */
+int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
+                               struct sg_table *sgt,
+                               enum dma_data_direction dir);
+
+/* unmap sgt from dma region. */
+void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
+                               struct sg_table *sgt,
+                               enum dma_data_direction dir);
+
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
new file mode 100644 (file)
index 0000000..5639353
--- /dev/null
@@ -0,0 +1,1870 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *     Jinyoung Jeon <jy0.jeon@samsung.com>
+ *     Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-gsc.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_gsc.h"
+
+/*
+ * GSC is stand for General SCaler and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * GSC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> GSC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> GSC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> GSC H/W ----> FIMD, Mixer.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define GSC_MAX_DEVS   4
+#define GSC_MAX_SRC            4
+#define GSC_MAX_DST            16
+#define GSC_RESET_TIMEOUT      50
+#define GSC_BUF_STOP   1
+#define GSC_BUF_START  2
+#define GSC_REG_SZ             16
+#define GSC_WIDTH_ITU_709      1280
+#define GSC_SC_UP_MAX_RATIO            65536
+#define GSC_SC_DOWN_RATIO_7_8          74898
+#define GSC_SC_DOWN_RATIO_6_8          87381
+#define GSC_SC_DOWN_RATIO_5_8          104857
+#define GSC_SC_DOWN_RATIO_4_8          131072
+#define GSC_SC_DOWN_RATIO_3_8          174762
+#define GSC_SC_DOWN_RATIO_2_8          262144
+#define GSC_REFRESH_MIN        12
+#define GSC_REFRESH_MAX        60
+#define GSC_CROP_MAX   8192
+#define GSC_CROP_MIN   32
+#define GSC_SCALE_MAX  4224
+#define GSC_SCALE_MIN  32
+#define GSC_COEF_RATIO 7
+#define GSC_COEF_PHASE 9
+#define GSC_COEF_ATTR  16
+#define GSC_COEF_H_8T  8
+#define GSC_COEF_V_4T  4
+#define GSC_COEF_DEPTH 3
+
+#define get_gsc_context(dev)   platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv)    container_of(ippdrv,\
+                                       struct gsc_context, ippdrv);
+#define gsc_read(offset)               readl(ctx->regs + (offset))
+#define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @pre_shfactor: pre sclaer shift factor.
+ * @pre_hratio: horizontal ratio of the prescaler.
+ * @pre_vratio: vertical ratio of the prescaler.
+ * @main_hratio: the main scaler's horizontal ratio.
+ * @main_vratio: the main scaler's vertical ratio.
+ */
+struct gsc_scaler {
+       bool    range;
+       u32     pre_shfactor;
+       u32     pre_hratio;
+       u32     pre_vratio;
+       unsigned long main_hratio;
+       unsigned long main_vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual 49.2 features.
+ * @tile_w: tile mode or rotation width.
+ * @tile_h: tile mode or rotation height.
+ * @w: other cases width.
+ * @h: other cases height.
+ */
+struct gsc_capability {
+       /* tile or rotation */
+       u32     tile_w;
+       u32     tile_h;
+       /* other cases */
+       u32     w;
+       u32     h;
+};
+
+/*
+ * A structure of gsc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @gsc_clk: gsc gate clock.
+ * @sc: scaler infomations.
+ * @id: gsc id.
+ * @irq: irq number.
+ * @rotation: supports rotation of src.
+ * @suspended: qos operations.
+ */
+struct gsc_context {
+       struct exynos_drm_ippdrv        ippdrv;
+       struct resource *regs_res;
+       void __iomem    *regs;
+       struct mutex    lock;
+       struct clk      *gsc_clk;
+       struct gsc_scaler       sc;
+       int     id;
+       int     irq;
+       bool    rotation;
+       bool    suspended;
+};
+
+/* 8-tap Filter Coefficient */
+static const int h_coef_8t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_H_8T] = {
+       {       /* Ratio <= 65536 (~8:8) */
+               {  0,  0,   0, 128,   0,   0,  0,  0 },
+               { -1,  2,  -6, 127,   7,  -2,  1,  0 },
+               { -1,  4, -12, 125,  16,  -5,  1,  0 },
+               { -1,  5, -15, 120,  25,  -8,  2,  0 },
+               { -1,  6, -18, 114,  35, -10,  3, -1 },
+               { -1,  6, -20, 107,  46, -13,  4, -1 },
+               { -2,  7, -21,  99,  57, -16,  5, -1 },
+               { -1,  6, -20,  89,  68, -18,  5, -1 },
+               { -1,  6, -20,  79,  79, -20,  6, -1 },
+               { -1,  5, -18,  68,  89, -20,  6, -1 },
+               { -1,  5, -16,  57,  99, -21,  7, -2 },
+               { -1,  4, -13,  46, 107, -20,  6, -1 },
+               { -1,  3, -10,  35, 114, -18,  6, -1 },
+               {  0,  2,  -8,  25, 120, -15,  5, -1 },
+               {  0,  1,  -5,  16, 125, -12,  4, -1 },
+               {  0,  1,  -2,   7, 127,  -6,  2, -1 }
+       }, {    /* 65536 < Ratio <= 74898 (~8:7) */
+               {  3, -8,  14, 111,  13,  -8,  3,  0 },
+               {  2, -6,   7, 112,  21, -10,  3, -1 },
+               {  2, -4,   1, 110,  28, -12,  4, -1 },
+               {  1, -2,  -3, 106,  36, -13,  4, -1 },
+               {  1, -1,  -7, 103,  44, -15,  4, -1 },
+               {  1,  1, -11,  97,  53, -16,  4, -1 },
+               {  0,  2, -13,  91,  61, -16,  4, -1 },
+               {  0,  3, -15,  85,  69, -17,  4, -1 },
+               {  0,  3, -16,  77,  77, -16,  3,  0 },
+               { -1,  4, -17,  69,  85, -15,  3,  0 },
+               { -1,  4, -16,  61,  91, -13,  2,  0 },
+               { -1,  4, -16,  53,  97, -11,  1,  1 },
+               { -1,  4, -15,  44, 103,  -7, -1,  1 },
+               { -1,  4, -13,  36, 106,  -3, -2,  1 },
+               { -1,  4, -12,  28, 110,   1, -4,  2 },
+               { -1,  3, -10,  21, 112,   7, -6,  2 }
+       }, {    /* 74898 < Ratio <= 87381 (~8:6) */
+               { 2, -11,  25,  96, 25, -11,   2,  0 },
+               { 2, -10,  19,  96, 31, -12,   2,  0 },
+               { 2,  -9,  14,  94, 37, -12,   2,  0 },
+               { 2,  -8,  10,  92, 43, -12,   1,  0 },
+               { 2,  -7,   5,  90, 49, -12,   1,  0 },
+               { 2,  -5,   1,  86, 55, -12,   0,  1 },
+               { 2,  -4,  -2,  82, 61, -11,  -1,  1 },
+               { 1,  -3,  -5,  77, 67,  -9,  -1,  1 },
+               { 1,  -2,  -7,  72, 72,  -7,  -2,  1 },
+               { 1,  -1,  -9,  67, 77,  -5,  -3,  1 },
+               { 1,  -1, -11,  61, 82,  -2,  -4,  2 },
+               { 1,   0, -12,  55, 86,   1,  -5,  2 },
+               { 0,   1, -12,  49, 90,   5,  -7,  2 },
+               { 0,   1, -12,  43, 92,  10,  -8,  2 },
+               { 0,   2, -12,  37, 94,  14,  -9,  2 },
+               { 0,   2, -12,  31, 96,  19, -10,  2 }
+       }, {    /* 87381 < Ratio <= 104857 (~8:5) */
+               { -1,  -8, 33,  80, 33,  -8,  -1,  0 },
+               { -1,  -8, 28,  80, 37,  -7,  -2,  1 },
+               {  0,  -8, 24,  79, 41,  -7,  -2,  1 },
+               {  0,  -8, 20,  78, 46,  -6,  -3,  1 },
+               {  0,  -8, 16,  76, 50,  -4,  -3,  1 },
+               {  0,  -7, 13,  74, 54,  -3,  -4,  1 },
+               {  1,  -7, 10,  71, 58,  -1,  -5,  1 },
+               {  1,  -6,  6,  68, 62,   1,  -5,  1 },
+               {  1,  -6,  4,  65, 65,   4,  -6,  1 },
+               {  1,  -5,  1,  62, 68,   6,  -6,  1 },
+               {  1,  -5, -1,  58, 71,  10,  -7,  1 },
+               {  1,  -4, -3,  54, 74,  13,  -7,  0 },
+               {  1,  -3, -4,  50, 76,  16,  -8,  0 },
+               {  1,  -3, -6,  46, 78,  20,  -8,  0 },
+               {  1,  -2, -7,  41, 79,  24,  -8,  0 },
+               {  1,  -2, -7,  37, 80,  28,  -8, -1 }
+       }, {    /* 104857 < Ratio <= 131072 (~8:4) */
+               { -3,   0, 35,  64, 35,   0,  -3,  0 },
+               { -3,  -1, 32,  64, 38,   1,  -3,  0 },
+               { -2,  -2, 29,  63, 41,   2,  -3,  0 },
+               { -2,  -3, 27,  63, 43,   4,  -4,  0 },
+               { -2,  -3, 24,  61, 46,   6,  -4,  0 },
+               { -2,  -3, 21,  60, 49,   7,  -4,  0 },
+               { -1,  -4, 19,  59, 51,   9,  -4, -1 },
+               { -1,  -4, 16,  57, 53,  12,  -4, -1 },
+               { -1,  -4, 14,  55, 55,  14,  -4, -1 },
+               { -1,  -4, 12,  53, 57,  16,  -4, -1 },
+               { -1,  -4,  9,  51, 59,  19,  -4, -1 },
+               {  0,  -4,  7,  49, 60,  21,  -3, -2 },
+               {  0,  -4,  6,  46, 61,  24,  -3, -2 },
+               {  0,  -4,  4,  43, 63,  27,  -3, -2 },
+               {  0,  -3,  2,  41, 63,  29,  -2, -2 },
+               {  0,  -3,  1,  38, 64,  32,  -1, -3 }
+       }, {    /* 131072 < Ratio <= 174762 (~8:3) */
+               { -1,   8, 33,  48, 33,   8,  -1,  0 },
+               { -1,   7, 31,  49, 35,   9,  -1, -1 },
+               { -1,   6, 30,  49, 36,  10,  -1, -1 },
+               { -1,   5, 28,  48, 38,  12,  -1, -1 },
+               { -1,   4, 26,  48, 39,  13,   0, -1 },
+               { -1,   3, 24,  47, 41,  15,   0, -1 },
+               { -1,   2, 23,  47, 42,  16,   0, -1 },
+               { -1,   2, 21,  45, 43,  18,   1, -1 },
+               { -1,   1, 19,  45, 45,  19,   1, -1 },
+               { -1,   1, 18,  43, 45,  21,   2, -1 },
+               { -1,   0, 16,  42, 47,  23,   2, -1 },
+               { -1,   0, 15,  41, 47,  24,   3, -1 },
+               { -1,   0, 13,  39, 48,  26,   4, -1 },
+               { -1,  -1, 12,  38, 48,  28,   5, -1 },
+               { -1,  -1, 10,  36, 49,  30,   6, -1 },
+               { -1,  -1,  9,  35, 49,  31,   7, -1 }
+       }, {    /* 174762 < Ratio <= 262144 (~8:2) */
+               {  2,  13, 30,  38, 30,  13,   2,  0 },
+               {  2,  12, 29,  38, 30,  14,   3,  0 },
+               {  2,  11, 28,  38, 31,  15,   3,  0 },
+               {  2,  10, 26,  38, 32,  16,   4,  0 },
+               {  1,  10, 26,  37, 33,  17,   4,  0 },
+               {  1,   9, 24,  37, 34,  18,   5,  0 },
+               {  1,   8, 24,  37, 34,  19,   5,  0 },
+               {  1,   7, 22,  36, 35,  20,   6,  1 },
+               {  1,   6, 21,  36, 36,  21,   6,  1 },
+               {  1,   6, 20,  35, 36,  22,   7,  1 },
+               {  0,   5, 19,  34, 37,  24,   8,  1 },
+               {  0,   5, 18,  34, 37,  24,   9,  1 },
+               {  0,   4, 17,  33, 37,  26,  10,  1 },
+               {  0,   4, 16,  32, 38,  26,  10,  2 },
+               {  0,   3, 15,  31, 38,  28,  11,  2 },
+               {  0,   3, 14,  30, 38,  29,  12,  2 }
+       }
+};
+
+/* 4-tap Filter Coefficient */
+static const int v_coef_4t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_V_4T] = {
+       {       /* Ratio <= 65536 (~8:8) */
+               {  0, 128,   0,  0 },
+               { -4, 127,   5,  0 },
+               { -6, 124,  11, -1 },
+               { -8, 118,  19, -1 },
+               { -8, 111,  27, -2 },
+               { -8, 102,  37, -3 },
+               { -8,  92,  48, -4 },
+               { -7,  81,  59, -5 },
+               { -6,  70,  70, -6 },
+               { -5,  59,  81, -7 },
+               { -4,  48,  92, -8 },
+               { -3,  37, 102, -8 },
+               { -2,  27, 111, -8 },
+               { -1,  19, 118, -8 },
+               { -1,  11, 124, -6 },
+               {  0,   5, 127, -4 }
+       }, {    /* 65536 < Ratio <= 74898 (~8:7) */
+               {  8, 112,   8,  0 },
+               {  4, 111,  14, -1 },
+               {  1, 109,  20, -2 },
+               { -2, 105,  27, -2 },
+               { -3, 100,  34, -3 },
+               { -5,  93,  43, -3 },
+               { -5,  86,  51, -4 },
+               { -5,  77,  60, -4 },
+               { -5,  69,  69, -5 },
+               { -4,  60,  77, -5 },
+               { -4,  51,  86, -5 },
+               { -3,  43,  93, -5 },
+               { -3,  34, 100, -3 },
+               { -2,  27, 105, -2 },
+               { -2,  20, 109,  1 },
+               { -1,  14, 111,  4 }
+       }, {    /* 74898 < Ratio <= 87381 (~8:6) */
+               { 16,  96,  16,  0 },
+               { 12,  97,  21, -2 },
+               {  8,  96,  26, -2 },
+               {  5,  93,  32, -2 },
+               {  2,  89,  39, -2 },
+               {  0,  84,  46, -2 },
+               { -1,  79,  53, -3 },
+               { -2,  73,  59, -2 },
+               { -2,  66,  66, -2 },
+               { -2,  59,  73, -2 },
+               { -3,  53,  79, -1 },
+               { -2,  46,  84,  0 },
+               { -2,  39,  89,  2 },
+               { -2,  32,  93,  5 },
+               { -2,  26,  96,  8 },
+               { -2,  21,  97, 12 }
+       }, {    /* 87381 < Ratio <= 104857 (~8:5) */
+               { 22,  84,  22,  0 },
+               { 18,  85,  26, -1 },
+               { 14,  84,  31, -1 },
+               { 11,  82,  36, -1 },
+               {  8,  79,  42, -1 },
+               {  6,  76,  47, -1 },
+               {  4,  72,  52,  0 },
+               {  2,  68,  58,  0 },
+               {  1,  63,  63,  1 },
+               {  0,  58,  68,  2 },
+               {  0,  52,  72,  4 },
+               { -1,  47,  76,  6 },
+               { -1,  42,  79,  8 },
+               { -1,  36,  82, 11 },
+               { -1,  31,  84, 14 },
+               { -1,  26,  85, 18 }
+       }, {    /* 104857 < Ratio <= 131072 (~8:4) */
+               { 26,  76,  26,  0 },
+               { 22,  76,  30,  0 },
+               { 19,  75,  34,  0 },
+               { 16,  73,  38,  1 },
+               { 13,  71,  43,  1 },
+               { 10,  69,  47,  2 },
+               {  8,  66,  51,  3 },
+               {  6,  63,  55,  4 },
+               {  5,  59,  59,  5 },
+               {  4,  55,  63,  6 },
+               {  3,  51,  66,  8 },
+               {  2,  47,  69, 10 },
+               {  1,  43,  71, 13 },
+               {  1,  38,  73, 16 },
+               {  0,  34,  75, 19 },
+               {  0,  30,  76, 22 }
+       }, {    /* 131072 < Ratio <= 174762 (~8:3) */
+               { 29,  70,  29,  0 },
+               { 26,  68,  32,  2 },
+               { 23,  67,  36,  2 },
+               { 20,  66,  39,  3 },
+               { 17,  65,  43,  3 },
+               { 15,  63,  46,  4 },
+               { 12,  61,  50,  5 },
+               { 10,  58,  53,  7 },
+               {  8,  56,  56,  8 },
+               {  7,  53,  58, 10 },
+               {  5,  50,  61, 12 },
+               {  4,  46,  63, 15 },
+               {  3,  43,  65, 17 },
+               {  3,  39,  66, 20 },
+               {  2,  36,  67, 23 },
+               {  2,  32,  68, 26 }
+       }, {    /* 174762 < Ratio <= 262144 (~8:2) */
+               { 32,  64,  32,  0 },
+               { 28,  63,  34,  3 },
+               { 25,  62,  37,  4 },
+               { 22,  62,  40,  4 },
+               { 19,  61,  43,  5 },
+               { 17,  59,  46,  6 },
+               { 15,  58,  48,  7 },
+               { 13,  55,  51,  9 },
+               { 11,  53,  53, 11 },
+               {  9,  51,  55, 13 },
+               {  7,  48,  58, 15 },
+               {  6,  46,  59, 17 },
+               {  5,  43,  61, 19 },
+               {  4,  40,  62, 22 },
+               {  4,  37,  62, 25 },
+               {  3,  34,  63, 28 }
+       }
+};
+
+static int gsc_sw_reset(struct gsc_context *ctx)
+{
+       u32 cfg;
+       int count = GSC_RESET_TIMEOUT;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       /* s/w reset */
+       cfg = (GSC_SW_RESET_SRESET);
+       gsc_write(cfg, GSC_SW_RESET);
+
+       /* wait s/w reset complete */
+       while (count--) {
+               cfg = gsc_read(GSC_SW_RESET);
+               if (!cfg)
+                       break;
+               usleep_range(1000, 2000);
+       }
+
+       if (cfg) {
+               DRM_ERROR("failed to reset gsc h/w.\n");
+               return -EBUSY;
+       }
+
+       /* reset sequence */
+       cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+       cfg |= (GSC_IN_BASE_ADDR_MASK |
+               GSC_IN_BASE_ADDR_PINGPONG(0));
+       gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+       gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+       gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+       cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+       cfg |= (GSC_OUT_BASE_ADDR_MASK |
+               GSC_OUT_BASE_ADDR_PINGPONG(0));
+       gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+       gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+       gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+       return 0;
+}
+
+static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
+{
+       u32 gscblk_cfg;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       gscblk_cfg = readl(SYSREG_GSCBLK_CFG1);
+
+       if (enable)
+               gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
+                               GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) |
+                               GSC_BLK_SW_RESET_WB_DEST(ctx->id);
+       else
+               gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
+
+       writel(gscblk_cfg, SYSREG_GSCBLK_CFG1);
+}
+
+static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
+               bool overflow, bool done)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+                       enable, overflow, done);
+
+       cfg = gsc_read(GSC_IRQ);
+       cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK);
+
+       if (enable)
+               cfg |= GSC_IRQ_ENABLE;
+       else
+               cfg &= ~GSC_IRQ_ENABLE;
+
+       if (overflow)
+               cfg &= ~GSC_IRQ_OR_MASK;
+       else
+               cfg |= GSC_IRQ_OR_MASK;
+
+       if (done)
+               cfg &= ~GSC_IRQ_FRMDONE_MASK;
+       else
+               cfg |= GSC_IRQ_FRMDONE_MASK;
+
+       gsc_write(cfg, GSC_IRQ);
+}
+
+
+static int gsc_src_set_fmt(struct device *dev, u32 fmt)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+       cfg = gsc_read(GSC_IN_CON);
+       cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
+                GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK |
+                GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE |
+                GSC_IN_CHROM_STRIDE_SEL_MASK | GSC_IN_RB_SWAP_MASK);
+
+       switch (fmt) {
+       case DRM_FORMAT_RGB565:
+               cfg |= GSC_IN_RGB565;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               cfg |= GSC_IN_XRGB8888;
+               break;
+       case DRM_FORMAT_BGRX8888:
+               cfg |= (GSC_IN_XRGB8888 | GSC_IN_RB_SWAP);
+               break;
+       case DRM_FORMAT_YUYV:
+               cfg |= (GSC_IN_YUV422_1P |
+                       GSC_IN_YUV422_1P_ORDER_LSB_Y |
+                       GSC_IN_CHROMA_ORDER_CBCR);
+               break;
+       case DRM_FORMAT_YVYU:
+               cfg |= (GSC_IN_YUV422_1P |
+                       GSC_IN_YUV422_1P_ORDER_LSB_Y |
+                       GSC_IN_CHROMA_ORDER_CRCB);
+               break;
+       case DRM_FORMAT_UYVY:
+               cfg |= (GSC_IN_YUV422_1P |
+                       GSC_IN_YUV422_1P_OEDER_LSB_C |
+                       GSC_IN_CHROMA_ORDER_CBCR);
+               break;
+       case DRM_FORMAT_VYUY:
+               cfg |= (GSC_IN_YUV422_1P |
+                       GSC_IN_YUV422_1P_OEDER_LSB_C |
+                       GSC_IN_CHROMA_ORDER_CRCB);
+               break;
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV61:
+               cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
+                       GSC_IN_YUV420_2P);
+               break;
+       case DRM_FORMAT_YUV422:
+               cfg |= GSC_IN_YUV422_3P;
+               break;
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+               cfg |= GSC_IN_YUV420_3P;
+               break;
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV16:
+               cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
+                       GSC_IN_YUV420_2P);
+               break;
+       case DRM_FORMAT_NV12MT:
+               cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE);
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+               return -EINVAL;
+       }
+
+       gsc_write(cfg, GSC_IN_CON);
+
+       return 0;
+}
+
+static int gsc_src_set_transf(struct device *dev,
+               enum drm_exynos_degree degree,
+               enum drm_exynos_flip flip, bool *swap)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+               degree, flip);
+
+       cfg = gsc_read(GSC_IN_CON);
+       cfg &= ~GSC_IN_ROT_MASK;
+
+       switch (degree) {
+       case EXYNOS_DRM_DEGREE_0:
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg |= GSC_IN_ROT_XFLIP;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg |= GSC_IN_ROT_YFLIP;
+               break;
+       case EXYNOS_DRM_DEGREE_90:
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg |= GSC_IN_ROT_90_XFLIP;
+               else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg |= GSC_IN_ROT_90_YFLIP;
+               else
+                       cfg |= GSC_IN_ROT_90;
+               break;
+       case EXYNOS_DRM_DEGREE_180:
+               cfg |= GSC_IN_ROT_180;
+               break;
+       case EXYNOS_DRM_DEGREE_270:
+               cfg |= GSC_IN_ROT_270;
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+               return -EINVAL;
+       }
+
+       gsc_write(cfg, GSC_IN_CON);
+
+       ctx->rotation = cfg &
+               (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+       *swap = ctx->rotation;
+
+       return 0;
+}
+
+static int gsc_src_set_size(struct device *dev, int swap,
+               struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct drm_exynos_pos img_pos = *pos;
+       struct gsc_scaler *sc = &ctx->sc;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+               __func__, swap, pos->x, pos->y, pos->w, pos->h);
+
+       if (swap) {
+               img_pos.w = pos->h;
+               img_pos.h = pos->w;
+       }
+
+       /* pixel offset */
+       cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) |
+               GSC_SRCIMG_OFFSET_Y(img_pos.y));
+       gsc_write(cfg, GSC_SRCIMG_OFFSET);
+
+       /* cropped size */
+       cfg = (GSC_CROPPED_WIDTH(img_pos.w) |
+               GSC_CROPPED_HEIGHT(img_pos.h));
+       gsc_write(cfg, GSC_CROPPED_SIZE);
+
+       DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
+               __func__, sz->hsize, sz->vsize);
+
+       /* original size */
+       cfg = gsc_read(GSC_SRCIMG_SIZE);
+       cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
+               GSC_SRCIMG_WIDTH_MASK);
+
+       cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) |
+               GSC_SRCIMG_HEIGHT(sz->vsize));
+
+       gsc_write(cfg, GSC_SRCIMG_SIZE);
+
+       cfg = gsc_read(GSC_IN_CON);
+       cfg &= ~GSC_IN_RGB_TYPE_MASK;
+
+       DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
+               __func__, pos->w, sc->range);
+
+       if (pos->w >= GSC_WIDTH_ITU_709)
+               if (sc->range)
+                       cfg |= GSC_IN_RGB_HD_WIDE;
+               else
+                       cfg |= GSC_IN_RGB_HD_NARROW;
+       else
+               if (sc->range)
+                       cfg |= GSC_IN_RGB_SD_WIDE;
+               else
+                       cfg |= GSC_IN_RGB_SD_NARROW;
+
+       gsc_write(cfg, GSC_IN_CON);
+
+       return 0;
+}
+
+static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       bool masked;
+       u32 cfg;
+       u32 mask = 0x00000001 << buf_id;
+
+       DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+               buf_id, buf_type);
+
+       /* mask register set */
+       cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               masked = false;
+               break;
+       case IPP_BUF_DEQUEUE:
+               masked = true;
+               break;
+       default:
+               dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+               return -EINVAL;
+       }
+
+       /* sequence id */
+       cfg &= ~mask;
+       cfg |= masked << buf_id;
+       gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+       gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+       gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+       return 0;
+}
+
+static int gsc_src_set_addr(struct device *dev,
+               struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_property *property;
+
+       if (!c_node) {
+               DRM_ERROR("failed to get c_node.\n");
+               return -EFAULT;
+       }
+
+       property = &c_node->property;
+       if (!property) {
+               DRM_ERROR("failed to get property.\n");
+               return -EFAULT;
+       }
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+               property->prop_id, buf_id, buf_type);
+
+       if (buf_id > GSC_MAX_SRC) {
+               dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+               return -EINVAL;
+       }
+
+       /* address register set */
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+                       GSC_IN_BASE_ADDR_Y(buf_id));
+               gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+                       GSC_IN_BASE_ADDR_CB(buf_id));
+               gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+                       GSC_IN_BASE_ADDR_CR(buf_id));
+               break;
+       case IPP_BUF_DEQUEUE:
+               gsc_write(0x0, GSC_IN_BASE_ADDR_Y(buf_id));
+               gsc_write(0x0, GSC_IN_BASE_ADDR_CB(buf_id));
+               gsc_write(0x0, GSC_IN_BASE_ADDR_CR(buf_id));
+               break;
+       default:
+               /* bypass */
+               break;
+       }
+
+       return gsc_src_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops gsc_src_ops = {
+       .set_fmt = gsc_src_set_fmt,
+       .set_transf = gsc_src_set_transf,
+       .set_size = gsc_src_set_size,
+       .set_addr = gsc_src_set_addr,
+};
+
+static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+       cfg = gsc_read(GSC_OUT_CON);
+       cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
+                GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK |
+                GSC_OUT_CHROM_STRIDE_SEL_MASK | GSC_OUT_RB_SWAP_MASK |
+                GSC_OUT_GLOBAL_ALPHA_MASK);
+
+       switch (fmt) {
+       case DRM_FORMAT_RGB565:
+               cfg |= GSC_OUT_RGB565;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               cfg |= GSC_OUT_XRGB8888;
+               break;
+       case DRM_FORMAT_BGRX8888:
+               cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP);
+               break;
+       case DRM_FORMAT_YUYV:
+               cfg |= (GSC_OUT_YUV422_1P |
+                       GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+                       GSC_OUT_CHROMA_ORDER_CBCR);
+               break;
+       case DRM_FORMAT_YVYU:
+               cfg |= (GSC_OUT_YUV422_1P |
+                       GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+                       GSC_OUT_CHROMA_ORDER_CRCB);
+               break;
+       case DRM_FORMAT_UYVY:
+               cfg |= (GSC_OUT_YUV422_1P |
+                       GSC_OUT_YUV422_1P_OEDER_LSB_C |
+                       GSC_OUT_CHROMA_ORDER_CBCR);
+               break;
+       case DRM_FORMAT_VYUY:
+               cfg |= (GSC_OUT_YUV422_1P |
+                       GSC_OUT_YUV422_1P_OEDER_LSB_C |
+                       GSC_OUT_CHROMA_ORDER_CRCB);
+               break;
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV61:
+               cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
+               break;
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+               cfg |= GSC_OUT_YUV420_3P;
+               break;
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV16:
+               cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
+                       GSC_OUT_YUV420_2P);
+               break;
+       case DRM_FORMAT_NV12MT:
+               cfg |= (GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE);
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+               return -EINVAL;
+       }
+
+       gsc_write(cfg, GSC_OUT_CON);
+
+       return 0;
+}
+
+static int gsc_dst_set_transf(struct device *dev,
+               enum drm_exynos_degree degree,
+               enum drm_exynos_flip flip, bool *swap)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+               degree, flip);
+
+       cfg = gsc_read(GSC_IN_CON);
+       cfg &= ~GSC_IN_ROT_MASK;
+
+       switch (degree) {
+       case EXYNOS_DRM_DEGREE_0:
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg |= GSC_IN_ROT_XFLIP;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg |= GSC_IN_ROT_YFLIP;
+               break;
+       case EXYNOS_DRM_DEGREE_90:
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg |= GSC_IN_ROT_90_XFLIP;
+               else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg |= GSC_IN_ROT_90_YFLIP;
+               else
+                       cfg |= GSC_IN_ROT_90;
+               break;
+       case EXYNOS_DRM_DEGREE_180:
+               cfg |= GSC_IN_ROT_180;
+               break;
+       case EXYNOS_DRM_DEGREE_270:
+               cfg |= GSC_IN_ROT_270;
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+               return -EINVAL;
+       }
+
+       gsc_write(cfg, GSC_IN_CON);
+
+       ctx->rotation = cfg &
+               (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+       *swap = ctx->rotation;
+
+       return 0;
+}
+
+static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
+{
+       DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+       if (src >= dst * 8) {
+               DRM_ERROR("failed to make ratio and shift.\n");
+               return -EINVAL;
+       } else if (src >= dst * 4)
+               *ratio = 4;
+       else if (src >= dst * 2)
+               *ratio = 2;
+       else
+               *ratio = 1;
+
+       return 0;
+}
+
+static void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *shfactor)
+{
+       if (hratio == 4 && vratio == 4)
+               *shfactor = 4;
+       else if ((hratio == 4 && vratio == 2) ||
+                (hratio == 2 && vratio == 4))
+               *shfactor = 3;
+       else if ((hratio == 4 && vratio == 1) ||
+                (hratio == 1 && vratio == 4) ||
+                (hratio == 2 && vratio == 2))
+               *shfactor = 2;
+       else if (hratio == 1 && vratio == 1)
+               *shfactor = 0;
+       else
+               *shfactor = 1;
+}
+
+static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
+               struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+       u32 src_w, src_h, dst_w, dst_h;
+       int ret = 0;
+
+       src_w = src->w;
+       src_h = src->h;
+
+       if (ctx->rotation) {
+               dst_w = dst->h;
+               dst_h = dst->w;
+       } else {
+               dst_w = dst->w;
+               dst_h = dst->h;
+       }
+
+       ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio);
+       if (ret) {
+               dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+               return ret;
+       }
+
+       ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio);
+       if (ret) {
+               dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+               return ret;
+       }
+
+       DRM_DEBUG_KMS("%s:pre_hratio[%d]pre_vratio[%d]\n",
+               __func__, sc->pre_hratio, sc->pre_vratio);
+
+       sc->main_hratio = (src_w << 16) / dst_w;
+       sc->main_vratio = (src_h << 16) / dst_h;
+
+       DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
+               __func__, sc->main_hratio, sc->main_vratio);
+
+       gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
+               &sc->pre_shfactor);
+
+       DRM_DEBUG_KMS("%s:pre_shfactor[%d]\n", __func__,
+               sc->pre_shfactor);
+
+       cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
+               GSC_PRESC_H_RATIO(sc->pre_hratio) |
+               GSC_PRESC_V_RATIO(sc->pre_vratio));
+       gsc_write(cfg, GSC_PRE_SCALE_RATIO);
+
+       return ret;
+}
+
+static void gsc_set_h_coef(struct gsc_context *ctx, unsigned long main_hratio)
+{
+       int i, j, k, sc_ratio;
+
+       if (main_hratio <= GSC_SC_UP_MAX_RATIO)
+               sc_ratio = 0;
+       else if (main_hratio <= GSC_SC_DOWN_RATIO_7_8)
+               sc_ratio = 1;
+       else if (main_hratio <= GSC_SC_DOWN_RATIO_6_8)
+               sc_ratio = 2;
+       else if (main_hratio <= GSC_SC_DOWN_RATIO_5_8)
+               sc_ratio = 3;
+       else if (main_hratio <= GSC_SC_DOWN_RATIO_4_8)
+               sc_ratio = 4;
+       else if (main_hratio <= GSC_SC_DOWN_RATIO_3_8)
+               sc_ratio = 5;
+       else
+               sc_ratio = 6;
+
+       for (i = 0; i < GSC_COEF_PHASE; i++)
+               for (j = 0; j < GSC_COEF_H_8T; j++)
+                       for (k = 0; k < GSC_COEF_DEPTH; k++)
+                               gsc_write(h_coef_8t[sc_ratio][i][j],
+                                       GSC_HCOEF(i, j, k));
+}
+
+static void gsc_set_v_coef(struct gsc_context *ctx, unsigned long main_vratio)
+{
+       int i, j, k, sc_ratio;
+
+       if (main_vratio <= GSC_SC_UP_MAX_RATIO)
+               sc_ratio = 0;
+       else if (main_vratio <= GSC_SC_DOWN_RATIO_7_8)
+               sc_ratio = 1;
+       else if (main_vratio <= GSC_SC_DOWN_RATIO_6_8)
+               sc_ratio = 2;
+       else if (main_vratio <= GSC_SC_DOWN_RATIO_5_8)
+               sc_ratio = 3;
+       else if (main_vratio <= GSC_SC_DOWN_RATIO_4_8)
+               sc_ratio = 4;
+       else if (main_vratio <= GSC_SC_DOWN_RATIO_3_8)
+               sc_ratio = 5;
+       else
+               sc_ratio = 6;
+
+       for (i = 0; i < GSC_COEF_PHASE; i++)
+               for (j = 0; j < GSC_COEF_V_4T; j++)
+                       for (k = 0; k < GSC_COEF_DEPTH; k++)
+                               gsc_write(v_coef_4t[sc_ratio][i][j],
+                                       GSC_VCOEF(i, j, k));
+}
+
+static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
+               __func__, sc->main_hratio, sc->main_vratio);
+
+       gsc_set_h_coef(ctx, sc->main_hratio);
+       cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
+       gsc_write(cfg, GSC_MAIN_H_RATIO);
+
+       gsc_set_v_coef(ctx, sc->main_vratio);
+       cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio);
+       gsc_write(cfg, GSC_MAIN_V_RATIO);
+}
+
+static int gsc_dst_set_size(struct device *dev, int swap,
+               struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct drm_exynos_pos img_pos = *pos;
+       struct gsc_scaler *sc = &ctx->sc;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+               __func__, swap, pos->x, pos->y, pos->w, pos->h);
+
+       if (swap) {
+               img_pos.w = pos->h;
+               img_pos.h = pos->w;
+       }
+
+       /* pixel offset */
+       cfg = (GSC_DSTIMG_OFFSET_X(pos->x) |
+               GSC_DSTIMG_OFFSET_Y(pos->y));
+       gsc_write(cfg, GSC_DSTIMG_OFFSET);
+
+       /* scaled size */
+       cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h));
+       gsc_write(cfg, GSC_SCALED_SIZE);
+
+       DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
+               __func__, sz->hsize, sz->vsize);
+
+       /* original size */
+       cfg = gsc_read(GSC_DSTIMG_SIZE);
+       cfg &= ~(GSC_DSTIMG_HEIGHT_MASK |
+               GSC_DSTIMG_WIDTH_MASK);
+       cfg |= (GSC_DSTIMG_WIDTH(sz->hsize) |
+               GSC_DSTIMG_HEIGHT(sz->vsize));
+       gsc_write(cfg, GSC_DSTIMG_SIZE);
+
+       cfg = gsc_read(GSC_OUT_CON);
+       cfg &= ~GSC_OUT_RGB_TYPE_MASK;
+
+       DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
+               __func__, pos->w, sc->range);
+
+       if (pos->w >= GSC_WIDTH_ITU_709)
+               if (sc->range)
+                       cfg |= GSC_OUT_RGB_HD_WIDE;
+               else
+                       cfg |= GSC_OUT_RGB_HD_NARROW;
+       else
+               if (sc->range)
+                       cfg |= GSC_OUT_RGB_SD_WIDE;
+               else
+                       cfg |= GSC_OUT_RGB_SD_NARROW;
+
+       gsc_write(cfg, GSC_OUT_CON);
+
+       return 0;
+}
+
+static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
+{
+       u32 cfg, i, buf_num = GSC_REG_SZ;
+       u32 mask = 0x00000001;
+
+       cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+       for (i = 0; i < GSC_REG_SZ; i++)
+               if (cfg & (mask << i))
+                       buf_num--;
+
+       DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+       return buf_num;
+}
+
+static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       bool masked;
+       u32 cfg;
+       u32 mask = 0x00000001 << buf_id;
+       int ret = 0;
+
+       DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+               buf_id, buf_type);
+
+       mutex_lock(&ctx->lock);
+
+       /* mask register set */
+       cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               masked = false;
+               break;
+       case IPP_BUF_DEQUEUE:
+               masked = true;
+               break;
+       default:
+               dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+               ret =  -EINVAL;
+               goto err_unlock;
+       }
+
+       /* sequence id */
+       cfg &= ~mask;
+       cfg |= masked << buf_id;
+       gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+       gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+       gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+       /* interrupt enable */
+       if (buf_type == IPP_BUF_ENQUEUE &&
+           gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
+               gsc_handle_irq(ctx, true, false, true);
+
+       /* interrupt disable */
+       if (buf_type == IPP_BUF_DEQUEUE &&
+           gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
+               gsc_handle_irq(ctx, false, false, true);
+
+err_unlock:
+       mutex_unlock(&ctx->lock);
+       return ret;
+}
+
+static int gsc_dst_set_addr(struct device *dev,
+               struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_property *property;
+
+       if (!c_node) {
+               DRM_ERROR("failed to get c_node.\n");
+               return -EFAULT;
+       }
+
+       property = &c_node->property;
+       if (!property) {
+               DRM_ERROR("failed to get property.\n");
+               return -EFAULT;
+       }
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+               property->prop_id, buf_id, buf_type);
+
+       if (buf_id > GSC_MAX_DST) {
+               dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+               return -EINVAL;
+       }
+
+       /* address register set */
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+                       GSC_OUT_BASE_ADDR_Y(buf_id));
+               gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+                       GSC_OUT_BASE_ADDR_CB(buf_id));
+               gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+                       GSC_OUT_BASE_ADDR_CR(buf_id));
+               break;
+       case IPP_BUF_DEQUEUE:
+               gsc_write(0x0, GSC_OUT_BASE_ADDR_Y(buf_id));
+               gsc_write(0x0, GSC_OUT_BASE_ADDR_CB(buf_id));
+               gsc_write(0x0, GSC_OUT_BASE_ADDR_CR(buf_id));
+               break;
+       default:
+               /* bypass */
+               break;
+       }
+
+       return gsc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops gsc_dst_ops = {
+       .set_fmt = gsc_dst_set_fmt,
+       .set_transf = gsc_dst_set_transf,
+       .set_size = gsc_dst_set_size,
+       .set_addr = gsc_dst_set_addr,
+};
+
+static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
+{
+       DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+       if (enable) {
+               clk_enable(ctx->gsc_clk);
+               ctx->suspended = false;
+       } else {
+               clk_disable(ctx->gsc_clk);
+               ctx->suspended = true;
+       }
+
+       return 0;
+}
+
+static int gsc_get_src_buf_index(struct gsc_context *ctx)
+{
+       u32 cfg, curr_index, i;
+       u32 buf_id = GSC_MAX_SRC;
+       int ret;
+
+       DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+       cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+       curr_index = GSC_IN_CURR_GET_INDEX(cfg);
+
+       for (i = curr_index; i < GSC_MAX_SRC; i++) {
+               if (!((cfg >> i) & 0x1)) {
+                       buf_id = i;
+                       break;
+               }
+       }
+
+       if (buf_id == GSC_MAX_SRC) {
+               DRM_ERROR("failed to get in buffer index.\n");
+               return -EINVAL;
+       }
+
+       ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+       if (ret < 0) {
+               DRM_ERROR("failed to dequeue.\n");
+               return ret;
+       }
+
+       DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
+               curr_index, buf_id);
+
+       return buf_id;
+}
+
+static int gsc_get_dst_buf_index(struct gsc_context *ctx)
+{
+       u32 cfg, curr_index, i;
+       u32 buf_id = GSC_MAX_DST;
+       int ret;
+
+       DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+       cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+       curr_index = GSC_OUT_CURR_GET_INDEX(cfg);
+
+       for (i = curr_index; i < GSC_MAX_DST; i++) {
+               if (!((cfg >> i) & 0x1)) {
+                       buf_id = i;
+                       break;
+               }
+       }
+
+       if (buf_id == GSC_MAX_DST) {
+               DRM_ERROR("failed to get out buffer index.\n");
+               return -EINVAL;
+       }
+
+       ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+       if (ret < 0) {
+               DRM_ERROR("failed to dequeue.\n");
+               return ret;
+       }
+
+       DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
+               curr_index, buf_id);
+
+       return buf_id;
+}
+
+static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
+{
+       struct gsc_context *ctx = dev_id;
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_event_work *event_work =
+               c_node->event_work;
+       u32 status;
+       int buf_id[EXYNOS_DRM_OPS_MAX];
+
+       DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+       status = gsc_read(GSC_IRQ);
+       if (status & GSC_IRQ_STATUS_OR_IRQ) {
+               dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+                       ctx->id, status);
+               return IRQ_NONE;
+       }
+
+       if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
+               dev_dbg(ippdrv->dev, "occured frame done at %d, status 0x%x.\n",
+                       ctx->id, status);
+
+               buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx);
+               if (buf_id[EXYNOS_DRM_OPS_SRC] < 0)
+                       return IRQ_HANDLED;
+
+               buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx);
+               if (buf_id[EXYNOS_DRM_OPS_DST] < 0)
+                       return IRQ_HANDLED;
+
+               DRM_DEBUG_KMS("%s:buf_id_src[%d]buf_id_dst[%d]\n", __func__,
+                       buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]);
+
+               event_work->ippdrv = ippdrv;
+               event_work->buf_id[EXYNOS_DRM_OPS_SRC] =
+                       buf_id[EXYNOS_DRM_OPS_SRC];
+               event_work->buf_id[EXYNOS_DRM_OPS_DST] =
+                       buf_id[EXYNOS_DRM_OPS_DST];
+               queue_work(ippdrv->event_workq,
+                       (struct work_struct *)event_work);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+       struct drm_exynos_ipp_prop_list *prop_list;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+       if (!prop_list) {
+               DRM_ERROR("failed to alloc property list.\n");
+               return -ENOMEM;
+       }
+
+       prop_list->version = 1;
+       prop_list->writeback = 1;
+       prop_list->refresh_min = GSC_REFRESH_MIN;
+       prop_list->refresh_max = GSC_REFRESH_MAX;
+       prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+                               (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+       prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+                               (1 << EXYNOS_DRM_DEGREE_90) |
+                               (1 << EXYNOS_DRM_DEGREE_180) |
+                               (1 << EXYNOS_DRM_DEGREE_270);
+       prop_list->csc = 1;
+       prop_list->crop = 1;
+       prop_list->crop_max.hsize = GSC_CROP_MAX;
+       prop_list->crop_max.vsize = GSC_CROP_MAX;
+       prop_list->crop_min.hsize = GSC_CROP_MIN;
+       prop_list->crop_min.vsize = GSC_CROP_MIN;
+       prop_list->scale = 1;
+       prop_list->scale_max.hsize = GSC_SCALE_MAX;
+       prop_list->scale_max.vsize = GSC_SCALE_MAX;
+       prop_list->scale_min.hsize = GSC_SCALE_MIN;
+       prop_list->scale_min.vsize = GSC_SCALE_MIN;
+
+       ippdrv->prop_list = prop_list;
+
+       return 0;
+}
+
+static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip)
+{
+       switch (flip) {
+       case EXYNOS_DRM_FLIP_NONE:
+       case EXYNOS_DRM_FLIP_VERTICAL:
+       case EXYNOS_DRM_FLIP_HORIZONTAL:
+       case EXYNOS_DRM_FLIP_VERTICAL | EXYNOS_DRM_FLIP_HORIZONTAL:
+               return true;
+       default:
+               DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+               return false;
+       }
+}
+
+static int gsc_ippdrv_check_property(struct device *dev,
+               struct drm_exynos_ipp_property *property)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+       struct drm_exynos_ipp_config *config;
+       struct drm_exynos_pos *pos;
+       struct drm_exynos_sz *sz;
+       bool swap;
+       int i;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       for_each_ipp_ops(i) {
+               if ((i == EXYNOS_DRM_OPS_SRC) &&
+                       (property->cmd == IPP_CMD_WB))
+                       continue;
+
+               config = &property->config[i];
+               pos = &config->pos;
+               sz = &config->sz;
+
+               /* check for flip */
+               if (!gsc_check_drm_flip(config->flip)) {
+                       DRM_ERROR("invalid flip.\n");
+                       goto err_property;
+               }
+
+               /* check for degree */
+               switch (config->degree) {
+               case EXYNOS_DRM_DEGREE_90:
+               case EXYNOS_DRM_DEGREE_270:
+                       swap = true;
+                       break;
+               case EXYNOS_DRM_DEGREE_0:
+               case EXYNOS_DRM_DEGREE_180:
+                       swap = false;
+                       break;
+               default:
+                       DRM_ERROR("invalid degree.\n");
+                       goto err_property;
+               }
+
+               /* check for buffer bound */
+               if ((pos->x + pos->w > sz->hsize) ||
+                       (pos->y + pos->h > sz->vsize)) {
+                       DRM_ERROR("out of buf bound.\n");
+                       goto err_property;
+               }
+
+               /* check for crop */
+               if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+                       if (swap) {
+                               if ((pos->h < pp->crop_min.hsize) ||
+                                       (sz->vsize > pp->crop_max.hsize) ||
+                                       (pos->w < pp->crop_min.vsize) ||
+                                       (sz->hsize > pp->crop_max.vsize)) {
+                                       DRM_ERROR("out of crop size.\n");
+                                       goto err_property;
+                               }
+                       } else {
+                               if ((pos->w < pp->crop_min.hsize) ||
+                                       (sz->hsize > pp->crop_max.hsize) ||
+                                       (pos->h < pp->crop_min.vsize) ||
+                                       (sz->vsize > pp->crop_max.vsize)) {
+                                       DRM_ERROR("out of crop size.\n");
+                                       goto err_property;
+                               }
+                       }
+               }
+
+               /* check for scale */
+               if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+                       if (swap) {
+                               if ((pos->h < pp->scale_min.hsize) ||
+                                       (sz->vsize > pp->scale_max.hsize) ||
+                                       (pos->w < pp->scale_min.vsize) ||
+                                       (sz->hsize > pp->scale_max.vsize)) {
+                                       DRM_ERROR("out of scale size.\n");
+                                       goto err_property;
+                               }
+                       } else {
+                               if ((pos->w < pp->scale_min.hsize) ||
+                                       (sz->hsize > pp->scale_max.hsize) ||
+                                       (pos->h < pp->scale_min.vsize) ||
+                                       (sz->vsize > pp->scale_max.vsize)) {
+                                       DRM_ERROR("out of scale size.\n");
+                                       goto err_property;
+                               }
+                       }
+               }
+       }
+
+       return 0;
+
+err_property:
+       for_each_ipp_ops(i) {
+               if ((i == EXYNOS_DRM_OPS_SRC) &&
+                       (property->cmd == IPP_CMD_WB))
+                       continue;
+
+               config = &property->config[i];
+               pos = &config->pos;
+               sz = &config->sz;
+
+               DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+                       i ? "dst" : "src", config->flip, config->degree,
+                       pos->x, pos->y, pos->w, pos->h,
+                       sz->hsize, sz->vsize);
+       }
+
+       return -EINVAL;
+}
+
+
+static int gsc_ippdrv_reset(struct device *dev)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct gsc_scaler *sc = &ctx->sc;
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       /* reset h/w block */
+       ret = gsc_sw_reset(ctx);
+       if (ret < 0) {
+               dev_err(dev, "failed to reset hardware.\n");
+               return ret;
+       }
+
+       /* scaler setting */
+       memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+       sc->range = true;
+
+       return 0;
+}
+
+static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_property *property;
+       struct drm_exynos_ipp_config *config;
+       struct drm_exynos_pos   img_pos[EXYNOS_DRM_OPS_MAX];
+       struct drm_exynos_ipp_set_wb set_wb;
+       u32 cfg;
+       int ret, i;
+
+       DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+       if (!c_node) {
+               DRM_ERROR("failed to get c_node.\n");
+               return -EINVAL;
+       }
+
+       property = &c_node->property;
+       if (!property) {
+               DRM_ERROR("failed to get property.\n");
+               return -EINVAL;
+       }
+
+       gsc_handle_irq(ctx, true, false, true);
+
+       for_each_ipp_ops(i) {
+               config = &property->config[i];
+               img_pos[i] = config->pos;
+       }
+
+       switch (cmd) {
+       case IPP_CMD_M2M:
+               /* enable one shot */
+               cfg = gsc_read(GSC_ENABLE);
+               cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK |
+                       GSC_ENABLE_CLK_GATE_MODE_MASK);
+               cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT;
+               gsc_write(cfg, GSC_ENABLE);
+
+               /* src dma memory */
+               cfg = gsc_read(GSC_IN_CON);
+               cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+               cfg |= GSC_IN_PATH_MEMORY;
+               gsc_write(cfg, GSC_IN_CON);
+
+               /* dst dma memory */
+               cfg = gsc_read(GSC_OUT_CON);
+               cfg |= GSC_OUT_PATH_MEMORY;
+               gsc_write(cfg, GSC_OUT_CON);
+               break;
+       case IPP_CMD_WB:
+               set_wb.enable = 1;
+               set_wb.refresh = property->refresh_rate;
+               gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
+               exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+
+               /* src local path */
+               cfg = readl(GSC_IN_CON);
+               cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+               cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB);
+               gsc_write(cfg, GSC_IN_CON);
+
+               /* dst dma memory */
+               cfg = gsc_read(GSC_OUT_CON);
+               cfg |= GSC_OUT_PATH_MEMORY;
+               gsc_write(cfg, GSC_OUT_CON);
+               break;
+       case IPP_CMD_OUTPUT:
+               /* src dma memory */
+               cfg = gsc_read(GSC_IN_CON);
+               cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+               cfg |= GSC_IN_PATH_MEMORY;
+               gsc_write(cfg, GSC_IN_CON);
+
+               /* dst local path */
+               cfg = gsc_read(GSC_OUT_CON);
+               cfg |= GSC_OUT_PATH_MEMORY;
+               gsc_write(cfg, GSC_OUT_CON);
+               break;
+       default:
+               ret = -EINVAL;
+               dev_err(dev, "invalid operations.\n");
+               return ret;
+       }
+
+       ret = gsc_set_prescaler(ctx, &ctx->sc,
+               &img_pos[EXYNOS_DRM_OPS_SRC],
+               &img_pos[EXYNOS_DRM_OPS_DST]);
+       if (ret) {
+               dev_err(dev, "failed to set precalser.\n");
+               return ret;
+       }
+
+       gsc_set_scaler(ctx, &ctx->sc);
+
+       cfg = gsc_read(GSC_ENABLE);
+       cfg |= GSC_ENABLE_ON;
+       gsc_write(cfg, GSC_ENABLE);
+
+       return 0;
+}
+
+static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+       switch (cmd) {
+       case IPP_CMD_M2M:
+               /* bypass */
+               break;
+       case IPP_CMD_WB:
+               gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
+               exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+               break;
+       case IPP_CMD_OUTPUT:
+       default:
+               dev_err(dev, "invalid operations.\n");
+               break;
+       }
+
+       gsc_handle_irq(ctx, false, false, true);
+
+       /* reset sequence */
+       gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK);
+       gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK);
+       gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK);
+
+       cfg = gsc_read(GSC_ENABLE);
+       cfg &= ~GSC_ENABLE_ON;
+       gsc_write(cfg, GSC_ENABLE);
+}
+
+static int __devinit gsc_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct gsc_context *ctx;
+       struct resource *res;
+       struct exynos_drm_ippdrv *ippdrv;
+       int ret;
+
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+
+       /* clock control */
+       ctx->gsc_clk = clk_get(dev, "gscl");
+       if (IS_ERR(ctx->gsc_clk)) {
+               dev_err(dev, "failed to get gsc clock.\n");
+               ret = PTR_ERR(ctx->gsc_clk);
+               goto err_ctx;
+       }
+
+       /* resource memory */
+       ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!ctx->regs_res) {
+               dev_err(dev, "failed to find registers.\n");
+               ret = -ENOENT;
+               goto err_clk;
+       }
+
+       ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
+       if (!ctx->regs) {
+               dev_err(dev, "failed to map registers.\n");
+               ret = -ENXIO;
+               goto err_clk;
+       }
+
+       /* resource irq */
+       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!res) {
+               dev_err(dev, "failed to request irq resource.\n");
+               ret = -ENOENT;
+               goto err_get_regs;
+       }
+
+       ctx->irq = res->start;
+       ret = request_threaded_irq(ctx->irq, NULL, gsc_irq_handler,
+               IRQF_ONESHOT, "drm_gsc", ctx);
+       if (ret < 0) {
+               dev_err(dev, "failed to request irq.\n");
+               goto err_get_regs;
+       }
+
+       /* context initailization */
+       ctx->id = pdev->id;
+
+       ippdrv = &ctx->ippdrv;
+       ippdrv->dev = dev;
+       ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops;
+       ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops;
+       ippdrv->check_property = gsc_ippdrv_check_property;
+       ippdrv->reset = gsc_ippdrv_reset;
+       ippdrv->start = gsc_ippdrv_start;
+       ippdrv->stop = gsc_ippdrv_stop;
+       ret = gsc_init_prop_list(ippdrv);
+       if (ret < 0) {
+               dev_err(dev, "failed to init property list.\n");
+               goto err_get_irq;
+       }
+
+       DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
+               (int)ippdrv);
+
+       mutex_init(&ctx->lock);
+       platform_set_drvdata(pdev, ctx);
+
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+
+       ret = exynos_drm_ippdrv_register(ippdrv);
+       if (ret < 0) {
+               dev_err(dev, "failed to register drm gsc device.\n");
+               goto err_ippdrv_register;
+       }
+
+       dev_info(&pdev->dev, "drm gsc registered successfully.\n");
+
+       return 0;
+
+err_ippdrv_register:
+       devm_kfree(dev, ippdrv->prop_list);
+       pm_runtime_disable(dev);
+err_get_irq:
+       free_irq(ctx->irq, ctx);
+err_get_regs:
+       devm_iounmap(dev, ctx->regs);
+err_clk:
+       clk_put(ctx->gsc_clk);
+err_ctx:
+       devm_kfree(dev, ctx);
+       return ret;
+}
+
+static int __devexit gsc_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+       devm_kfree(dev, ippdrv->prop_list);
+       exynos_drm_ippdrv_unregister(ippdrv);
+       mutex_destroy(&ctx->lock);
+
+       pm_runtime_set_suspended(dev);
+       pm_runtime_disable(dev);
+
+       free_irq(ctx->irq, ctx);
+       devm_iounmap(dev, ctx->regs);
+
+       clk_put(ctx->gsc_clk);
+
+       devm_kfree(dev, ctx);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gsc_suspend(struct device *dev)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+       if (pm_runtime_suspended(dev))
+               return 0;
+
+       return gsc_clk_ctrl(ctx, false);
+}
+
+static int gsc_resume(struct device *dev)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+       if (!pm_runtime_suspended(dev))
+               return gsc_clk_ctrl(ctx, true);
+
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int gsc_runtime_suspend(struct device *dev)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+       return  gsc_clk_ctrl(ctx, false);
+}
+
+static int gsc_runtime_resume(struct device *dev)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __FILE__, ctx->id);
+
+       return  gsc_clk_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops gsc_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
+       SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
+};
+
+struct platform_driver gsc_driver = {
+       .probe          = gsc_probe,
+       .remove         = __devexit_p(gsc_remove),
+       .driver         = {
+               .name   = "exynos-drm-gsc",
+               .owner  = THIS_MODULE,
+               .pm     = &gsc_pm_ops,
+       },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.h b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
new file mode 100644 (file)
index 0000000..b3c3bc6
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *     Jinyoung Jeon <jy0.jeon@samsung.com>
+ *     Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_GSC_H_
+#define _EXYNOS_DRM_GSC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ * Mixer output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_GSC_H_ */
index c3b9e2b..55793c4 100644 (file)
@@ -29,6 +29,9 @@
 #define get_ctx_from_subdrv(subdrv)    container_of(subdrv,\
                                        struct drm_hdmi_context, subdrv);
 
+/* platform device pointer for common drm hdmi device. */
+static struct platform_device *exynos_drm_hdmi_pdev;
+
 /* Common hdmi subdrv needs to access the hdmi and mixer though context.
 * These should be initialied by the repective drivers */
 static struct exynos_drm_hdmi_context *hdmi_ctx;
@@ -46,6 +49,25 @@ struct drm_hdmi_context {
        bool    enabled[MIXER_WIN_NR];
 };
 
+int exynos_platform_device_hdmi_register(void)
+{
+       if (exynos_drm_hdmi_pdev)
+               return -EEXIST;
+
+       exynos_drm_hdmi_pdev = platform_device_register_simple(
+                       "exynos-drm-hdmi", -1, NULL, 0);
+       if (IS_ERR_OR_NULL(exynos_drm_hdmi_pdev))
+               return PTR_ERR(exynos_drm_hdmi_pdev);
+
+       return 0;
+}
+
+void exynos_platform_device_hdmi_unregister(void)
+{
+       if (exynos_drm_hdmi_pdev)
+               platform_device_unregister(exynos_drm_hdmi_pdev);
+}
+
 void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx)
 {
        if (ctx)
@@ -157,6 +179,16 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
                return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
 }
 
+static void drm_hdmi_wait_for_vblank(struct device *subdrv_dev)
+{
+       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (mixer_ops && mixer_ops->wait_for_vblank)
+               mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
+}
+
 static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
                                struct drm_connector *connector,
                                const struct drm_display_mode *mode,
@@ -238,6 +270,7 @@ static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
        .apply = drm_hdmi_apply,
        .enable_vblank = drm_hdmi_enable_vblank,
        .disable_vblank = drm_hdmi_disable_vblank,
+       .wait_for_vblank = drm_hdmi_wait_for_vblank,
        .mode_fixup = drm_hdmi_mode_fixup,
        .mode_set = drm_hdmi_mode_set,
        .get_max_resol = drm_hdmi_get_max_resol,
@@ -291,21 +324,10 @@ static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
        ctx->enabled[win] = false;
 }
 
-static void drm_mixer_wait_for_vblank(struct device *subdrv_dev)
-{
-       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
-
-       DRM_DEBUG_KMS("%s\n", __FILE__);
-
-       if (mixer_ops && mixer_ops->wait_for_vblank)
-               mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
-}
-
 static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
        .mode_set = drm_mixer_mode_set,
        .commit = drm_mixer_commit,
        .disable = drm_mixer_disable,
-       .wait_for_vblank = drm_mixer_wait_for_vblank,
 };
 
 static struct exynos_drm_manager hdmi_manager = {
@@ -346,9 +368,23 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev,
        ctx->hdmi_ctx->drm_dev = drm_dev;
        ctx->mixer_ctx->drm_dev = drm_dev;
 
+       if (mixer_ops->iommu_on)
+               mixer_ops->iommu_on(ctx->mixer_ctx->ctx, true);
+
        return 0;
 }
 
+static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+       struct drm_hdmi_context *ctx;
+       struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
+
+       ctx = get_ctx_from_subdrv(subdrv);
+
+       if (mixer_ops->iommu_on)
+               mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false);
+}
+
 static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -368,6 +404,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
        subdrv->dev = dev;
        subdrv->manager = &hdmi_manager;
        subdrv->probe = hdmi_subdrv_probe;
+       subdrv->remove = hdmi_subdrv_remove;
 
        platform_set_drvdata(pdev, subdrv);
 
index 2da5ffd..fcc3093 100644 (file)
@@ -62,12 +62,13 @@ struct exynos_hdmi_ops {
 
 struct exynos_mixer_ops {
        /* manager */
+       int (*iommu_on)(void *ctx, bool enable);
        int (*enable_vblank)(void *ctx, int pipe);
        void (*disable_vblank)(void *ctx);
+       void (*wait_for_vblank)(void *ctx);
        void (*dpms)(void *ctx, int mode);
 
        /* overlay */
-       void (*wait_for_vblank)(void *ctx);
        void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
        void (*win_commit)(void *ctx, int zpos);
        void (*win_disable)(void *ctx, int zpos);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
new file mode 100644 (file)
index 0000000..2482b7f
--- /dev/null
@@ -0,0 +1,150 @@
+/* exynos_drm_iommu.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drmP.h>
+#include <drm/exynos_drm.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/kref.h>
+
+#include <asm/dma-iommu.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * drm_create_iommu_mapping - create a mapping structure
+ *
+ * @drm_dev: DRM device
+ */
+int drm_create_iommu_mapping(struct drm_device *drm_dev)
+{
+       struct dma_iommu_mapping *mapping = NULL;
+       struct exynos_drm_private *priv = drm_dev->dev_private;
+       struct device *dev = drm_dev->dev;
+
+       if (!priv->da_start)
+               priv->da_start = EXYNOS_DEV_ADDR_START;
+       if (!priv->da_space_size)
+               priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
+       if (!priv->da_space_order)
+               priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
+
+       mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
+                                               priv->da_space_size,
+                                               priv->da_space_order);
+       if (IS_ERR(mapping))
+               return PTR_ERR(mapping);
+
+       dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+                                       GFP_KERNEL);
+       dma_set_max_seg_size(dev, 0xffffffffu);
+       dev->archdata.mapping = mapping;
+
+       return 0;
+}
+
+/*
+ * drm_release_iommu_mapping - release iommu mapping structure
+ *
+ * @drm_dev: DRM device
+ *
+ * if mapping->kref becomes 0 then all things related to iommu mapping
+ * will be released
+ */
+void drm_release_iommu_mapping(struct drm_device *drm_dev)
+{
+       struct device *dev = drm_dev->dev;
+
+       arm_iommu_release_mapping(dev->archdata.mapping);
+}
+
+/*
+ * drm_iommu_attach_device- attach device to iommu mapping
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be attach
+ *
+ * This function should be called by sub drivers to attach it to iommu
+ * mapping.
+ */
+int drm_iommu_attach_device(struct drm_device *drm_dev,
+                               struct device *subdrv_dev)
+{
+       struct device *dev = drm_dev->dev;
+       int ret;
+
+       if (!dev->archdata.mapping) {
+               DRM_ERROR("iommu_mapping is null.\n");
+               return -EFAULT;
+       }
+
+       subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
+                                       sizeof(*subdrv_dev->dma_parms),
+                                       GFP_KERNEL);
+       dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
+
+       ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
+       if (ret < 0) {
+               DRM_DEBUG_KMS("failed iommu attach.\n");
+               return ret;
+       }
+
+       /*
+        * Set dma_ops to drm_device just one time.
+        *
+        * The dma mapping api needs device object and the api is used
+        * to allocate physial memory and map it with iommu table.
+        * If iommu attach succeeded, the sub driver would have dma_ops
+        * for iommu and also all sub drivers have same dma_ops.
+        */
+       if (!dev->archdata.dma_ops)
+               dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops;
+
+       return 0;
+}
+
+/*
+ * drm_iommu_detach_device -detach device address space mapping from device
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be detached
+ *
+ * This function should be called by sub drivers to detach it from iommu
+ * mapping
+ */
+void drm_iommu_detach_device(struct drm_device *drm_dev,
+                               struct device *subdrv_dev)
+{
+       struct device *dev = drm_dev->dev;
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+       if (!mapping || !mapping->domain)
+               return;
+
+       iommu_detach_device(mapping->domain, subdrv_dev);
+       drm_release_iommu_mapping(drm_dev);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
new file mode 100644 (file)
index 0000000..18a0ca1
--- /dev/null
@@ -0,0 +1,85 @@
+/* exynos_drm_iommu.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Authoer: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_IOMMU_H_
+#define _EXYNOS_DRM_IOMMU_H_
+
+#define EXYNOS_DEV_ADDR_START  0x20000000
+#define EXYNOS_DEV_ADDR_SIZE   0x40000000
+#define EXYNOS_DEV_ADDR_ORDER  0x4
+
+#ifdef CONFIG_DRM_EXYNOS_IOMMU
+
+int drm_create_iommu_mapping(struct drm_device *drm_dev);
+
+void drm_release_iommu_mapping(struct drm_device *drm_dev);
+
+int drm_iommu_attach_device(struct drm_device *drm_dev,
+                               struct device *subdrv_dev);
+
+void drm_iommu_detach_device(struct drm_device *dev_dev,
+                               struct device *subdrv_dev);
+
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+       struct device *dev = drm_dev->dev;
+
+       return dev->archdata.mapping ? true : false;
+#else
+       return false;
+#endif
+}
+
+#else
+
+struct dma_iommu_mapping;
+static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
+{
+       return 0;
+}
+
+static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
+{
+}
+
+static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
+                                               struct device *subdrv_dev)
+{
+       return 0;
+}
+
+static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
+                                               struct device *subdrv_dev)
+{
+}
+
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+       return false;
+}
+
+#endif
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
new file mode 100644 (file)
index 0000000..49eebe9
--- /dev/null
@@ -0,0 +1,2060 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *     Jinyoung Jeon <jy0.jeon@samsung.com>
+ *     Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * IPP is stand for Image Post Processing and
+ * supports image scaler/rotator and input/output DMA operations.
+ * using FIMC, GSC, Rotator, so on.
+ * IPP is integration device driver of same attribute h/w
+ */
+
+/*
+ * TODO
+ * 1. expand command control id.
+ * 2. integrate        property and config.
+ * 3. removed send_event id check routine.
+ * 4. compare send_event id if needed.
+ * 5. free subdrv_remove notifier callback list if needed.
+ * 6. need to check subdrv_open about multi-open.
+ * 7. need to power_on implement power and sysmmu ctrl.
+ */
+
+#define get_ipp_context(dev)   platform_get_drvdata(to_platform_device(dev))
+#define ipp_is_m2m_cmd(c)      (c == IPP_CMD_M2M)
+
+/*
+ * A structure of event.
+ *
+ * @base: base of event.
+ * @event: ipp event.
+ */
+struct drm_exynos_ipp_send_event {
+       struct drm_pending_event        base;
+       struct drm_exynos_ipp_event     event;
+};
+
+/*
+ * A structure of memory node.
+ *
+ * @list: list head to memory queue information.
+ * @ops_id: id of operations.
+ * @prop_id: id of property.
+ * @buf_id: id of buffer.
+ * @buf_info: gem objects and dma address, size.
+ * @filp: a pointer to drm_file.
+ */
+struct drm_exynos_ipp_mem_node {
+       struct list_head        list;
+       enum drm_exynos_ops_id  ops_id;
+       u32     prop_id;
+       u32     buf_id;
+       struct drm_exynos_ipp_buf_info  buf_info;
+       struct drm_file         *filp;
+};
+
+/*
+ * A structure of ipp context.
+ *
+ * @subdrv: prepare initialization using subdrv.
+ * @ipp_lock: lock for synchronization of access to ipp_idr.
+ * @prop_lock: lock for synchronization of access to prop_idr.
+ * @ipp_idr: ipp driver idr.
+ * @prop_idr: property idr.
+ * @event_workq: event work queue.
+ * @cmd_workq: command work queue.
+ */
+struct ipp_context {
+       struct exynos_drm_subdrv        subdrv;
+       struct mutex    ipp_lock;
+       struct mutex    prop_lock;
+       struct idr      ipp_idr;
+       struct idr      prop_idr;
+       struct workqueue_struct *event_workq;
+       struct workqueue_struct *cmd_workq;
+};
+
+static LIST_HEAD(exynos_drm_ippdrv_list);
+static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
+static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
+
+int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!ippdrv)
+               return -EINVAL;
+
+       mutex_lock(&exynos_drm_ippdrv_lock);
+       list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
+       mutex_unlock(&exynos_drm_ippdrv_lock);
+
+       return 0;
+}
+
+int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!ippdrv)
+               return -EINVAL;
+
+       mutex_lock(&exynos_drm_ippdrv_lock);
+       list_del(&ippdrv->drv_list);
+       mutex_unlock(&exynos_drm_ippdrv_lock);
+
+       return 0;
+}
+
+static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
+               u32 *idp)
+{
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+again:
+       /* ensure there is space available to allocate a handle */
+       if (idr_pre_get(id_idr, GFP_KERNEL) == 0) {
+               DRM_ERROR("failed to get idr.\n");
+               return -ENOMEM;
+       }
+
+       /* do the allocation under our mutexlock */
+       mutex_lock(lock);
+       ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
+       mutex_unlock(lock);
+       if (ret == -EAGAIN)
+               goto again;
+
+       return ret;
+}
+
+static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
+{
+       void *obj;
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id);
+
+       mutex_lock(lock);
+
+       /* find object using handle */
+       obj = idr_find(id_idr, id);
+       if (!obj) {
+               DRM_ERROR("failed to find object.\n");
+               mutex_unlock(lock);
+               return ERR_PTR(-ENODEV);
+       }
+
+       mutex_unlock(lock);
+
+       return obj;
+}
+
+static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
+               enum drm_exynos_ipp_cmd cmd)
+{
+       /*
+        * check dedicated flag and WB, OUTPUT operation with
+        * power on state.
+        */
+       if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
+           !pm_runtime_suspended(ippdrv->dev)))
+               return true;
+
+       return false;
+}
+
+static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
+               struct drm_exynos_ipp_property *property)
+{
+       struct exynos_drm_ippdrv *ippdrv;
+       u32 ipp_id = property->ipp_id;
+
+       DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
+
+       if (ipp_id) {
+               /* find ipp driver using idr */
+               ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
+                       ipp_id);
+               if (IS_ERR_OR_NULL(ippdrv)) {
+                       DRM_ERROR("not found ipp%d driver.\n", ipp_id);
+                       return ippdrv;
+               }
+
+               /*
+                * WB, OUTPUT opertion not supported multi-operation.
+                * so, make dedicated state at set property ioctl.
+                * when ipp driver finished operations, clear dedicated flags.
+                */
+               if (ipp_check_dedicated(ippdrv, property->cmd)) {
+                       DRM_ERROR("already used choose device.\n");
+                       return ERR_PTR(-EBUSY);
+               }
+
+               /*
+                * This is necessary to find correct device in ipp drivers.
+                * ipp drivers have different abilities,
+                * so need to check property.
+                */
+               if (ippdrv->check_property &&
+                   ippdrv->check_property(ippdrv->dev, property)) {
+                       DRM_ERROR("not support property.\n");
+                       return ERR_PTR(-EINVAL);
+               }
+
+               return ippdrv;
+       } else {
+               /*
+                * This case is search all ipp driver for finding.
+                * user application don't set ipp_id in this case,
+                * so ipp subsystem search correct driver in driver list.
+                */
+               list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+                       if (ipp_check_dedicated(ippdrv, property->cmd)) {
+                               DRM_DEBUG_KMS("%s:used device.\n", __func__);
+                               continue;
+                       }
+
+                       if (ippdrv->check_property &&
+                           ippdrv->check_property(ippdrv->dev, property)) {
+                               DRM_DEBUG_KMS("%s:not support property.\n",
+                                       __func__);
+                               continue;
+                       }
+
+                       return ippdrv;
+               }
+
+               DRM_ERROR("not support ipp driver operations.\n");
+       }
+
+       return ERR_PTR(-ENODEV);
+}
+
+static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
+{
+       struct exynos_drm_ippdrv *ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node;
+       int count = 0;
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+       if (list_empty(&exynos_drm_ippdrv_list)) {
+               DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
+               return ERR_PTR(-ENODEV);
+       }
+
+       /*
+        * This case is search ipp driver by prop_id handle.
+        * sometimes, ipp subsystem find driver by prop_id.
+        * e.g PAUSE state, queue buf, command contro.
+        */
+       list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+               DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__,
+                       count++, (int)ippdrv);
+
+               if (!list_empty(&ippdrv->cmd_list)) {
+                       list_for_each_entry(c_node, &ippdrv->cmd_list, list)
+                               if (c_node->property.prop_id == prop_id)
+                                       return ippdrv;
+               }
+       }
+
+       return ERR_PTR(-ENODEV);
+}
+
+int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+               struct drm_file *file)
+{
+       struct drm_exynos_file_private *file_priv = file->driver_priv;
+       struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+       struct device *dev = priv->dev;
+       struct ipp_context *ctx = get_ipp_context(dev);
+       struct drm_exynos_ipp_prop_list *prop_list = data;
+       struct exynos_drm_ippdrv *ippdrv;
+       int count = 0;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!ctx) {
+               DRM_ERROR("invalid context.\n");
+               return -EINVAL;
+       }
+
+       if (!prop_list) {
+               DRM_ERROR("invalid property parameter.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id);
+
+       if (!prop_list->ipp_id) {
+               list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
+                       count++;
+               /*
+                * Supports ippdrv list count for user application.
+                * First step user application getting ippdrv count.
+                * and second step getting ippdrv capability using ipp_id.
+                */
+               prop_list->count = count;
+       } else {
+               /*
+                * Getting ippdrv capability by ipp_id.
+                * some deivce not supported wb, output interface.
+                * so, user application detect correct ipp driver
+                * using this ioctl.
+                */
+               ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
+                                               prop_list->ipp_id);
+               if (!ippdrv) {
+                       DRM_ERROR("not found ipp%d driver.\n",
+                                       prop_list->ipp_id);
+                       return -EINVAL;
+               }
+
+               prop_list = ippdrv->prop_list;
+       }
+
+       return 0;
+}
+
+static void ipp_print_property(struct drm_exynos_ipp_property *property,
+               int idx)
+{
+       struct drm_exynos_ipp_config *config = &property->config[idx];
+       struct drm_exynos_pos *pos = &config->pos;
+       struct drm_exynos_sz *sz = &config->sz;
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
+               __func__, property->prop_id, idx ? "dst" : "src", config->fmt);
+
+       DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
+               __func__, pos->x, pos->y, pos->w, pos->h,
+               sz->hsize, sz->vsize, config->flip, config->degree);
+}
+
+static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
+{
+       struct exynos_drm_ippdrv *ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node;
+       u32 prop_id = property->prop_id;
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+       ippdrv = ipp_find_drv_by_handle(prop_id);
+       if (IS_ERR_OR_NULL(ippdrv)) {
+               DRM_ERROR("failed to get ipp driver.\n");
+               return -EINVAL;
+       }
+
+       /*
+        * Find command node using command list in ippdrv.
+        * when we find this command no using prop_id.
+        * return property information set in this command node.
+        */
+       list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
+               if ((c_node->property.prop_id == prop_id) &&
+                   (c_node->state == IPP_STATE_STOP)) {
+                       DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n",
+                               __func__, property->cmd, (int)ippdrv);
+
+                       c_node->property = *property;
+                       return 0;
+               }
+       }
+
+       DRM_ERROR("failed to search property.\n");
+
+       return -EINVAL;
+}
+
+static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
+{
+       struct drm_exynos_ipp_cmd_work *cmd_work;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
+       if (!cmd_work) {
+               DRM_ERROR("failed to alloc cmd_work.\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
+
+       return cmd_work;
+}
+
+static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
+{
+       struct drm_exynos_ipp_event_work *event_work;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
+       if (!event_work) {
+               DRM_ERROR("failed to alloc event_work.\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
+
+       return event_work;
+}
+
+int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+               struct drm_file *file)
+{
+       struct drm_exynos_file_private *file_priv = file->driver_priv;
+       struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+       struct device *dev = priv->dev;
+       struct ipp_context *ctx = get_ipp_context(dev);
+       struct drm_exynos_ipp_property *property = data;
+       struct exynos_drm_ippdrv *ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node;
+       int ret, i;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!ctx) {
+               DRM_ERROR("invalid context.\n");
+               return -EINVAL;
+       }
+
+       if (!property) {
+               DRM_ERROR("invalid property parameter.\n");
+               return -EINVAL;
+       }
+
+       /*
+        * This is log print for user application property.
+        * user application set various property.
+        */
+       for_each_ipp_ops(i)
+               ipp_print_property(property, i);
+
+       /*
+        * set property ioctl generated new prop_id.
+        * but in this case already asigned prop_id using old set property.
+        * e.g PAUSE state. this case supports find current prop_id and use it
+        * instead of allocation.
+        */
+       if (property->prop_id) {
+               DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+               return ipp_find_and_set_property(property);
+       }
+
+       /* find ipp driver using ipp id */
+       ippdrv = ipp_find_driver(ctx, property);
+       if (IS_ERR_OR_NULL(ippdrv)) {
+               DRM_ERROR("failed to get ipp driver.\n");
+               return -EINVAL;
+       }
+
+       /* allocate command node */
+       c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
+       if (!c_node) {
+               DRM_ERROR("failed to allocate map node.\n");
+               return -ENOMEM;
+       }
+
+       /* create property id */
+       ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
+               &property->prop_id);
+       if (ret) {
+               DRM_ERROR("failed to create id.\n");
+               goto err_clear;
+       }
+
+       DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
+               __func__, property->prop_id, property->cmd, (int)ippdrv);
+
+       /* stored property information and ippdrv in private data */
+       c_node->priv = priv;
+       c_node->property = *property;
+       c_node->state = IPP_STATE_IDLE;
+
+       c_node->start_work = ipp_create_cmd_work();
+       if (IS_ERR_OR_NULL(c_node->start_work)) {
+               DRM_ERROR("failed to create start work.\n");
+               goto err_clear;
+       }
+
+       c_node->stop_work = ipp_create_cmd_work();
+       if (IS_ERR_OR_NULL(c_node->stop_work)) {
+               DRM_ERROR("failed to create stop work.\n");
+               goto err_free_start;
+       }
+
+       c_node->event_work = ipp_create_event_work();
+       if (IS_ERR_OR_NULL(c_node->event_work)) {
+               DRM_ERROR("failed to create event work.\n");
+               goto err_free_stop;
+       }
+
+       mutex_init(&c_node->cmd_lock);
+       mutex_init(&c_node->mem_lock);
+       mutex_init(&c_node->event_lock);
+
+       init_completion(&c_node->start_complete);
+       init_completion(&c_node->stop_complete);
+
+       for_each_ipp_ops(i)
+               INIT_LIST_HEAD(&c_node->mem_list[i]);
+
+       INIT_LIST_HEAD(&c_node->event_list);
+       list_splice_init(&priv->event_list, &c_node->event_list);
+       list_add_tail(&c_node->list, &ippdrv->cmd_list);
+
+       /* make dedicated state without m2m */
+       if (!ipp_is_m2m_cmd(property->cmd))
+               ippdrv->dedicated = true;
+
+       return 0;
+
+err_free_stop:
+       kfree(c_node->stop_work);
+err_free_start:
+       kfree(c_node->start_work);
+err_clear:
+       kfree(c_node);
+       return ret;
+}
+
+static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
+{
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       /* delete list */
+       list_del(&c_node->list);
+
+       /* destroy mutex */
+       mutex_destroy(&c_node->cmd_lock);
+       mutex_destroy(&c_node->mem_lock);
+       mutex_destroy(&c_node->event_lock);
+
+       /* free command node */
+       kfree(c_node->start_work);
+       kfree(c_node->stop_work);
+       kfree(c_node->event_work);
+       kfree(c_node);
+}
+
+static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
+{
+       struct drm_exynos_ipp_property *property = &c_node->property;
+       struct drm_exynos_ipp_mem_node *m_node;
+       struct list_head *head;
+       int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       mutex_lock(&c_node->mem_lock);
+
+       for_each_ipp_ops(i) {
+               /* source/destination memory list */
+               head = &c_node->mem_list[i];
+
+               if (list_empty(head)) {
+                       DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__,
+                               i ? "dst" : "src");
+                       continue;
+               }
+
+               /* find memory node entry */
+               list_for_each_entry(m_node, head, list) {
+                       DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__,
+                               i ? "dst" : "src", count[i], (int)m_node);
+                       count[i]++;
+               }
+       }
+
+       DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__,
+               min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
+               max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
+
+       /*
+        * M2M operations should be need paired memory address.
+        * so, need to check minimum count about src, dst.
+        * other case not use paired memory, so use maximum count
+        */
+       if (ipp_is_m2m_cmd(property->cmd))
+               ret = min(count[EXYNOS_DRM_OPS_SRC],
+                       count[EXYNOS_DRM_OPS_DST]);
+       else
+               ret = max(count[EXYNOS_DRM_OPS_SRC],
+                       count[EXYNOS_DRM_OPS_DST]);
+
+       mutex_unlock(&c_node->mem_lock);
+
+       return ret;
+}
+
+static struct drm_exynos_ipp_mem_node
+               *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_queue_buf *qbuf)
+{
+       struct drm_exynos_ipp_mem_node *m_node;
+       struct list_head *head;
+       int count = 0;
+
+       DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id);
+
+       /* source/destination memory list */
+       head = &c_node->mem_list[qbuf->ops_id];
+
+       /* find memory node from memory list */
+       list_for_each_entry(m_node, head, list) {
+               DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
+                       __func__, count++, (int)m_node);
+
+               /* compare buffer id */
+               if (m_node->buf_id == qbuf->buf_id)
+                       return m_node;
+       }
+
+       return NULL;
+}
+
+static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
+               struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_mem_node *m_node)
+{
+       struct exynos_drm_ipp_ops *ops = NULL;
+       int ret = 0;
+
+       DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
+
+       if (!m_node) {
+               DRM_ERROR("invalid queue node.\n");
+               return -EFAULT;
+       }
+
+       mutex_lock(&c_node->mem_lock);
+
+       DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
+
+       /* get operations callback */
+       ops = ippdrv->ops[m_node->ops_id];
+       if (!ops) {
+               DRM_ERROR("not support ops.\n");
+               ret = -EFAULT;
+               goto err_unlock;
+       }
+
+       /* set address and enable irq */
+       if (ops->set_addr) {
+               ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
+                       m_node->buf_id, IPP_BUF_ENQUEUE);
+               if (ret) {
+                       DRM_ERROR("failed to set addr.\n");
+                       goto err_unlock;
+               }
+       }
+
+err_unlock:
+       mutex_unlock(&c_node->mem_lock);
+       return ret;
+}
+
+static struct drm_exynos_ipp_mem_node
+               *ipp_get_mem_node(struct drm_device *drm_dev,
+               struct drm_file *file,
+               struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_queue_buf *qbuf)
+{
+       struct drm_exynos_ipp_mem_node *m_node;
+       struct drm_exynos_ipp_buf_info buf_info;
+       void *addr;
+       int i;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       mutex_lock(&c_node->mem_lock);
+
+       m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
+       if (!m_node) {
+               DRM_ERROR("failed to allocate queue node.\n");
+               goto err_unlock;
+       }
+
+       /* clear base address for error handling */
+       memset(&buf_info, 0x0, sizeof(buf_info));
+
+       /* operations, buffer id */
+       m_node->ops_id = qbuf->ops_id;
+       m_node->prop_id = qbuf->prop_id;
+       m_node->buf_id = qbuf->buf_id;
+
+       DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
+               (int)m_node, qbuf->ops_id);
+       DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
+               qbuf->prop_id, m_node->buf_id);
+
+       for_each_ipp_planar(i) {
+               DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
+                       i, qbuf->handle[i]);
+
+               /* get dma address by handle */
+               if (qbuf->handle[i]) {
+                       addr = exynos_drm_gem_get_dma_addr(drm_dev,
+                                       qbuf->handle[i], file);
+                       if (IS_ERR(addr)) {
+                               DRM_ERROR("failed to get addr.\n");
+                               goto err_clear;
+                       }
+
+                       buf_info.handles[i] = qbuf->handle[i];
+                       buf_info.base[i] = *(dma_addr_t *) addr;
+                       DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n",
+                               __func__, i, buf_info.base[i],
+                               (int)buf_info.handles[i]);
+               }
+       }
+
+       m_node->filp = file;
+       m_node->buf_info = buf_info;
+       list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
+
+       mutex_unlock(&c_node->mem_lock);
+       return m_node;
+
+err_clear:
+       kfree(m_node);
+err_unlock:
+       mutex_unlock(&c_node->mem_lock);
+       return ERR_PTR(-EFAULT);
+}
+
+static int ipp_put_mem_node(struct drm_device *drm_dev,
+               struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_mem_node *m_node)
+{
+       int i;
+
+       DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
+
+       if (!m_node) {
+               DRM_ERROR("invalid dequeue node.\n");
+               return -EFAULT;
+       }
+
+       if (list_empty(&m_node->list)) {
+               DRM_ERROR("empty memory node.\n");
+               return -ENOMEM;
+       }
+
+       mutex_lock(&c_node->mem_lock);
+
+       DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
+
+       /* put gem buffer */
+       for_each_ipp_planar(i) {
+               unsigned long handle = m_node->buf_info.handles[i];
+               if (handle)
+                       exynos_drm_gem_put_dma_addr(drm_dev, handle,
+                                                       m_node->filp);
+       }
+
+       /* delete list in queue */
+       list_del(&m_node->list);
+       kfree(m_node);
+
+       mutex_unlock(&c_node->mem_lock);
+
+       return 0;
+}
+
+static void ipp_free_event(struct drm_pending_event *event)
+{
+       kfree(event);
+}
+
+static int ipp_get_event(struct drm_device *drm_dev,
+               struct drm_file *file,
+               struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_queue_buf *qbuf)
+{
+       struct drm_exynos_ipp_send_event *e;
+       unsigned long flags;
+
+       DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
+               qbuf->ops_id, qbuf->buf_id);
+
+       e = kzalloc(sizeof(*e), GFP_KERNEL);
+
+       if (!e) {
+               DRM_ERROR("failed to allocate event.\n");
+               spin_lock_irqsave(&drm_dev->event_lock, flags);
+               file->event_space += sizeof(e->event);
+               spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+               return -ENOMEM;
+       }
+
+       /* make event */
+       e->event.base.type = DRM_EXYNOS_IPP_EVENT;
+       e->event.base.length = sizeof(e->event);
+       e->event.user_data = qbuf->user_data;
+       e->event.prop_id = qbuf->prop_id;
+       e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
+       e->base.event = &e->event.base;
+       e->base.file_priv = file;
+       e->base.destroy = ipp_free_event;
+       list_add_tail(&e->base.link, &c_node->event_list);
+
+       return 0;
+}
+
+static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_queue_buf *qbuf)
+{
+       struct drm_exynos_ipp_send_event *e, *te;
+       int count = 0;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (list_empty(&c_node->event_list)) {
+               DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__);
+               return;
+       }
+
+       list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
+               DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
+                       __func__, count++, (int)e);
+
+               /*
+                * quf == NULL condition means all event deletion.
+                * stop operations want to delete all event list.
+                * another case delete only same buf id.
+                */
+               if (!qbuf) {
+                       /* delete list */
+                       list_del(&e->base.link);
+                       kfree(e);
+               }
+
+               /* compare buffer id */
+               if (qbuf && (qbuf->buf_id ==
+                   e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
+                       /* delete list */
+                       list_del(&e->base.link);
+                       kfree(e);
+                       return;
+               }
+       }
+}
+
+void ipp_handle_cmd_work(struct device *dev,
+               struct exynos_drm_ippdrv *ippdrv,
+               struct drm_exynos_ipp_cmd_work *cmd_work,
+               struct drm_exynos_ipp_cmd_node *c_node)
+{
+       struct ipp_context *ctx = get_ipp_context(dev);
+
+       cmd_work->ippdrv = ippdrv;
+       cmd_work->c_node = c_node;
+       queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
+}
+
+static int ipp_queue_buf_with_run(struct device *dev,
+               struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_mem_node *m_node,
+               struct drm_exynos_ipp_queue_buf *qbuf)
+{
+       struct exynos_drm_ippdrv *ippdrv;
+       struct drm_exynos_ipp_property *property;
+       struct exynos_drm_ipp_ops *ops;
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
+       if (IS_ERR_OR_NULL(ippdrv)) {
+               DRM_ERROR("failed to get ipp driver.\n");
+               return -EFAULT;
+       }
+
+       ops = ippdrv->ops[qbuf->ops_id];
+       if (!ops) {
+               DRM_ERROR("failed to get ops.\n");
+               return -EFAULT;
+       }
+
+       property = &c_node->property;
+
+       if (c_node->state != IPP_STATE_START) {
+               DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__);
+               return 0;
+       }
+
+       if (!ipp_check_mem_list(c_node)) {
+               DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+               return 0;
+       }
+
+       /*
+        * If set destination buffer and enabled clock,
+        * then m2m operations need start operations at queue_buf
+        */
+       if (ipp_is_m2m_cmd(property->cmd)) {
+               struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
+
+               cmd_work->ctrl = IPP_CTRL_PLAY;
+               ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+       } else {
+               ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+               if (ret) {
+                       DRM_ERROR("failed to set m node.\n");
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static void ipp_clean_queue_buf(struct drm_device *drm_dev,
+               struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_queue_buf *qbuf)
+{
+       struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
+               /* delete list */
+               list_for_each_entry_safe(m_node, tm_node,
+                       &c_node->mem_list[qbuf->ops_id], list) {
+                       if (m_node->buf_id == qbuf->buf_id &&
+                           m_node->ops_id == qbuf->ops_id)
+                               ipp_put_mem_node(drm_dev, c_node, m_node);
+               }
+       }
+}
+
+int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
+               struct drm_file *file)
+{
+       struct drm_exynos_file_private *file_priv = file->driver_priv;
+       struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+       struct device *dev = priv->dev;
+       struct ipp_context *ctx = get_ipp_context(dev);
+       struct drm_exynos_ipp_queue_buf *qbuf = data;
+       struct drm_exynos_ipp_cmd_node *c_node;
+       struct drm_exynos_ipp_mem_node *m_node;
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!qbuf) {
+               DRM_ERROR("invalid buf parameter.\n");
+               return -EINVAL;
+       }
+
+       if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
+               DRM_ERROR("invalid ops parameter.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
+               __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
+               qbuf->buf_id, qbuf->buf_type);
+
+       /* find command node */
+       c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
+               qbuf->prop_id);
+       if (!c_node) {
+               DRM_ERROR("failed to get command node.\n");
+               return -EFAULT;
+       }
+
+       /* buffer control */
+       switch (qbuf->buf_type) {
+       case IPP_BUF_ENQUEUE:
+               /* get memory node */
+               m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
+               if (IS_ERR(m_node)) {
+                       DRM_ERROR("failed to get m_node.\n");
+                       return PTR_ERR(m_node);
+               }
+
+               /*
+                * first step get event for destination buffer.
+                * and second step when M2M case run with destination buffer
+                * if needed.
+                */
+               if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
+                       /* get event for destination buffer */
+                       ret = ipp_get_event(drm_dev, file, c_node, qbuf);
+                       if (ret) {
+                               DRM_ERROR("failed to get event.\n");
+                               goto err_clean_node;
+                       }
+
+                       /*
+                        * M2M case run play control for streaming feature.
+                        * other case set address and waiting.
+                        */
+                       ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
+                       if (ret) {
+                               DRM_ERROR("failed to run command.\n");
+                               goto err_clean_node;
+                       }
+               }
+               break;
+       case IPP_BUF_DEQUEUE:
+               mutex_lock(&c_node->cmd_lock);
+
+               /* put event for destination buffer */
+               if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
+                       ipp_put_event(c_node, qbuf);
+
+               ipp_clean_queue_buf(drm_dev, c_node, qbuf);
+
+               mutex_unlock(&c_node->cmd_lock);
+               break;
+       default:
+               DRM_ERROR("invalid buffer control.\n");
+               return -EINVAL;
+       }
+
+       return 0;
+
+err_clean_node:
+       DRM_ERROR("clean memory nodes.\n");
+
+       ipp_clean_queue_buf(drm_dev, c_node, qbuf);
+       return ret;
+}
+
+static bool exynos_drm_ipp_check_valid(struct device *dev,
+               enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
+{
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (ctrl != IPP_CTRL_PLAY) {
+               if (pm_runtime_suspended(dev)) {
+                       DRM_ERROR("pm:runtime_suspended.\n");
+                       goto err_status;
+               }
+       }
+
+       switch (ctrl) {
+       case IPP_CTRL_PLAY:
+               if (state != IPP_STATE_IDLE)
+                       goto err_status;
+               break;
+       case IPP_CTRL_STOP:
+               if (state == IPP_STATE_STOP)
+                       goto err_status;
+               break;
+       case IPP_CTRL_PAUSE:
+               if (state != IPP_STATE_START)
+                       goto err_status;
+               break;
+       case IPP_CTRL_RESUME:
+               if (state != IPP_STATE_STOP)
+                       goto err_status;
+               break;
+       default:
+               DRM_ERROR("invalid state.\n");
+               goto err_status;
+               break;
+       }
+
+       return true;
+
+err_status:
+       DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
+       return false;
+}
+
+int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
+               struct drm_file *file)
+{
+       struct drm_exynos_file_private *file_priv = file->driver_priv;
+       struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+       struct exynos_drm_ippdrv *ippdrv = NULL;
+       struct device *dev = priv->dev;
+       struct ipp_context *ctx = get_ipp_context(dev);
+       struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
+       struct drm_exynos_ipp_cmd_work *cmd_work;
+       struct drm_exynos_ipp_cmd_node *c_node;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!ctx) {
+               DRM_ERROR("invalid context.\n");
+               return -EINVAL;
+       }
+
+       if (!cmd_ctrl) {
+               DRM_ERROR("invalid control parameter.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__,
+               cmd_ctrl->ctrl, cmd_ctrl->prop_id);
+
+       ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
+       if (IS_ERR(ippdrv)) {
+               DRM_ERROR("failed to get ipp driver.\n");
+               return PTR_ERR(ippdrv);
+       }
+
+       c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
+               cmd_ctrl->prop_id);
+       if (!c_node) {
+               DRM_ERROR("invalid command node list.\n");
+               return -EINVAL;
+       }
+
+       if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
+           c_node->state)) {
+               DRM_ERROR("invalid state.\n");
+               return -EINVAL;
+       }
+
+       switch (cmd_ctrl->ctrl) {
+       case IPP_CTRL_PLAY:
+               if (pm_runtime_suspended(ippdrv->dev))
+                       pm_runtime_get_sync(ippdrv->dev);
+               c_node->state = IPP_STATE_START;
+
+               cmd_work = c_node->start_work;
+               cmd_work->ctrl = cmd_ctrl->ctrl;
+               ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+               c_node->state = IPP_STATE_START;
+               break;
+       case IPP_CTRL_STOP:
+               cmd_work = c_node->stop_work;
+               cmd_work->ctrl = cmd_ctrl->ctrl;
+               ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+
+               if (!wait_for_completion_timeout(&c_node->stop_complete,
+                   msecs_to_jiffies(300))) {
+                       DRM_ERROR("timeout stop:prop_id[%d]\n",
+                               c_node->property.prop_id);
+               }
+
+               c_node->state = IPP_STATE_STOP;
+               ippdrv->dedicated = false;
+               ipp_clean_cmd_node(c_node);
+
+               if (list_empty(&ippdrv->cmd_list))
+                       pm_runtime_put_sync(ippdrv->dev);
+               break;
+       case IPP_CTRL_PAUSE:
+               cmd_work = c_node->stop_work;
+               cmd_work->ctrl = cmd_ctrl->ctrl;
+               ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+
+               if (!wait_for_completion_timeout(&c_node->stop_complete,
+                   msecs_to_jiffies(200))) {
+                       DRM_ERROR("timeout stop:prop_id[%d]\n",
+                               c_node->property.prop_id);
+               }
+
+               c_node->state = IPP_STATE_STOP;
+               break;
+       case IPP_CTRL_RESUME:
+               c_node->state = IPP_STATE_START;
+               cmd_work = c_node->start_work;
+               cmd_work->ctrl = cmd_ctrl->ctrl;
+               ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+               break;
+       default:
+               DRM_ERROR("could not support this state currently.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__,
+               cmd_ctrl->ctrl, cmd_ctrl->prop_id);
+
+       return 0;
+}
+
+int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+       return blocking_notifier_chain_register(
+               &exynos_drm_ippnb_list, nb);
+}
+
+int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+       return blocking_notifier_chain_unregister(
+               &exynos_drm_ippnb_list, nb);
+}
+
+int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+       return blocking_notifier_call_chain(
+               &exynos_drm_ippnb_list, val, v);
+}
+
+static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
+               struct drm_exynos_ipp_property *property)
+{
+       struct exynos_drm_ipp_ops *ops = NULL;
+       bool swap = false;
+       int ret, i;
+
+       if (!property) {
+               DRM_ERROR("invalid property parameter.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+       /* reset h/w block */
+       if (ippdrv->reset &&
+           ippdrv->reset(ippdrv->dev)) {
+               DRM_ERROR("failed to reset.\n");
+               return -EINVAL;
+       }
+
+       /* set source,destination operations */
+       for_each_ipp_ops(i) {
+               struct drm_exynos_ipp_config *config =
+                       &property->config[i];
+
+               ops = ippdrv->ops[i];
+               if (!ops || !config) {
+                       DRM_ERROR("not support ops and config.\n");
+                       return -EINVAL;
+               }
+
+               /* set format */
+               if (ops->set_fmt) {
+                       ret = ops->set_fmt(ippdrv->dev, config->fmt);
+                       if (ret) {
+                               DRM_ERROR("not support format.\n");
+                               return ret;
+                       }
+               }
+
+               /* set transform for rotation, flip */
+               if (ops->set_transf) {
+                       ret = ops->set_transf(ippdrv->dev, config->degree,
+                               config->flip, &swap);
+                       if (ret) {
+                               DRM_ERROR("not support tranf.\n");
+                               return -EINVAL;
+                       }
+               }
+
+               /* set size */
+               if (ops->set_size) {
+                       ret = ops->set_size(ippdrv->dev, swap, &config->pos,
+                               &config->sz);
+                       if (ret) {
+                               DRM_ERROR("not support size.\n");
+                               return ret;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
+               struct drm_exynos_ipp_cmd_node *c_node)
+{
+       struct drm_exynos_ipp_mem_node *m_node;
+       struct drm_exynos_ipp_property *property = &c_node->property;
+       struct list_head *head;
+       int ret, i;
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+       /* store command info in ippdrv */
+       ippdrv->cmd = c_node;
+
+       if (!ipp_check_mem_list(c_node)) {
+               DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+               return -ENOMEM;
+       }
+
+       /* set current property in ippdrv */
+       ret = ipp_set_property(ippdrv, property);
+       if (ret) {
+               DRM_ERROR("failed to set property.\n");
+               ippdrv->cmd = NULL;
+               return ret;
+       }
+
+       /* check command */
+       switch (property->cmd) {
+       case IPP_CMD_M2M:
+               for_each_ipp_ops(i) {
+                       /* source/destination memory list */
+                       head = &c_node->mem_list[i];
+
+                       m_node = list_first_entry(head,
+                               struct drm_exynos_ipp_mem_node, list);
+                       if (!m_node) {
+                               DRM_ERROR("failed to get node.\n");
+                               ret = -EFAULT;
+                               return ret;
+                       }
+
+                       DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
+                               __func__, (int)m_node);
+
+                       ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+                       if (ret) {
+                               DRM_ERROR("failed to set m node.\n");
+                               return ret;
+                       }
+               }
+               break;
+       case IPP_CMD_WB:
+               /* destination memory list */
+               head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+               list_for_each_entry(m_node, head, list) {
+                       ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+                       if (ret) {
+                               DRM_ERROR("failed to set m node.\n");
+                               return ret;
+                       }
+               }
+               break;
+       case IPP_CMD_OUTPUT:
+               /* source memory list */
+               head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+               list_for_each_entry(m_node, head, list) {
+                       ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+                       if (ret) {
+                               DRM_ERROR("failed to set m node.\n");
+                               return ret;
+                       }
+               }
+               break;
+       default:
+               DRM_ERROR("invalid operations.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd);
+
+       /* start operations */
+       if (ippdrv->start) {
+               ret = ippdrv->start(ippdrv->dev, property->cmd);
+               if (ret) {
+                       DRM_ERROR("failed to start ops.\n");
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int ipp_stop_property(struct drm_device *drm_dev,
+               struct exynos_drm_ippdrv *ippdrv,
+               struct drm_exynos_ipp_cmd_node *c_node)
+{
+       struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+       struct drm_exynos_ipp_property *property = &c_node->property;
+       struct list_head *head;
+       int ret = 0, i;
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+       /* put event */
+       ipp_put_event(c_node, NULL);
+
+       /* check command */
+       switch (property->cmd) {
+       case IPP_CMD_M2M:
+               for_each_ipp_ops(i) {
+                       /* source/destination memory list */
+                       head = &c_node->mem_list[i];
+
+                       if (list_empty(head)) {
+                               DRM_DEBUG_KMS("%s:mem_list is empty.\n",
+                                       __func__);
+                               break;
+                       }
+
+                       list_for_each_entry_safe(m_node, tm_node,
+                               head, list) {
+                               ret = ipp_put_mem_node(drm_dev, c_node,
+                                       m_node);
+                               if (ret) {
+                                       DRM_ERROR("failed to put m_node.\n");
+                                       goto err_clear;
+                               }
+                       }
+               }
+               break;
+       case IPP_CMD_WB:
+               /* destination memory list */
+               head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+               if (list_empty(head)) {
+                       DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
+                       break;
+               }
+
+               list_for_each_entry_safe(m_node, tm_node, head, list) {
+                       ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+                       if (ret) {
+                               DRM_ERROR("failed to put m_node.\n");
+                               goto err_clear;
+                       }
+               }
+               break;
+       case IPP_CMD_OUTPUT:
+               /* source memory list */
+               head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+               if (list_empty(head)) {
+                       DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
+                       break;
+               }
+
+               list_for_each_entry_safe(m_node, tm_node, head, list) {
+                       ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+                       if (ret) {
+                               DRM_ERROR("failed to put m_node.\n");
+                               goto err_clear;
+                       }
+               }
+               break;
+       default:
+               DRM_ERROR("invalid operations.\n");
+               ret = -EINVAL;
+               goto err_clear;
+       }
+
+err_clear:
+       /* stop operations */
+       if (ippdrv->stop)
+               ippdrv->stop(ippdrv->dev, property->cmd);
+
+       return ret;
+}
+
+void ipp_sched_cmd(struct work_struct *work)
+{
+       struct drm_exynos_ipp_cmd_work *cmd_work =
+               (struct drm_exynos_ipp_cmd_work *)work;
+       struct exynos_drm_ippdrv *ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node;
+       struct drm_exynos_ipp_property *property;
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       ippdrv = cmd_work->ippdrv;
+       if (!ippdrv) {
+               DRM_ERROR("invalid ippdrv list.\n");
+               return;
+       }
+
+       c_node = cmd_work->c_node;
+       if (!c_node) {
+               DRM_ERROR("invalid command node list.\n");
+               return;
+       }
+
+       mutex_lock(&c_node->cmd_lock);
+
+       property = &c_node->property;
+       if (!property) {
+               DRM_ERROR("failed to get property:prop_id[%d]\n",
+                       c_node->property.prop_id);
+               goto err_unlock;
+       }
+
+       switch (cmd_work->ctrl) {
+       case IPP_CTRL_PLAY:
+       case IPP_CTRL_RESUME:
+               ret = ipp_start_property(ippdrv, c_node);
+               if (ret) {
+                       DRM_ERROR("failed to start property:prop_id[%d]\n",
+                               c_node->property.prop_id);
+                       goto err_unlock;
+               }
+
+               /*
+                * M2M case supports wait_completion of transfer.
+                * because M2M case supports single unit operation
+                * with multiple queue.
+                * M2M need to wait completion of data transfer.
+                */
+               if (ipp_is_m2m_cmd(property->cmd)) {
+                       if (!wait_for_completion_timeout
+                           (&c_node->start_complete, msecs_to_jiffies(200))) {
+                               DRM_ERROR("timeout event:prop_id[%d]\n",
+                                       c_node->property.prop_id);
+                               goto err_unlock;
+                       }
+               }
+               break;
+       case IPP_CTRL_STOP:
+       case IPP_CTRL_PAUSE:
+               ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
+                       c_node);
+               if (ret) {
+                       DRM_ERROR("failed to stop property.\n");
+                       goto err_unlock;
+               }
+
+               complete(&c_node->stop_complete);
+               break;
+       default:
+               DRM_ERROR("unknown control type\n");
+               break;
+       }
+
+       DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl);
+
+err_unlock:
+       mutex_unlock(&c_node->cmd_lock);
+}
+
+static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
+               struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
+{
+       struct drm_device *drm_dev = ippdrv->drm_dev;
+       struct drm_exynos_ipp_property *property = &c_node->property;
+       struct drm_exynos_ipp_mem_node *m_node;
+       struct drm_exynos_ipp_queue_buf qbuf;
+       struct drm_exynos_ipp_send_event *e;
+       struct list_head *head;
+       struct timeval now;
+       unsigned long flags;
+       u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
+       int ret, i;
+
+       for_each_ipp_ops(i)
+               DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
+                       i ? "dst" : "src", buf_id[i]);
+
+       if (!drm_dev) {
+               DRM_ERROR("failed to get drm_dev.\n");
+               return -EINVAL;
+       }
+
+       if (!property) {
+               DRM_ERROR("failed to get property.\n");
+               return -EINVAL;
+       }
+
+       if (list_empty(&c_node->event_list)) {
+               DRM_DEBUG_KMS("%s:event list is empty.\n", __func__);
+               return 0;
+       }
+
+       if (!ipp_check_mem_list(c_node)) {
+               DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+               return 0;
+       }
+
+       /* check command */
+       switch (property->cmd) {
+       case IPP_CMD_M2M:
+               for_each_ipp_ops(i) {
+                       /* source/destination memory list */
+                       head = &c_node->mem_list[i];
+
+                       m_node = list_first_entry(head,
+                               struct drm_exynos_ipp_mem_node, list);
+                       if (!m_node) {
+                               DRM_ERROR("empty memory node.\n");
+                               return -ENOMEM;
+                       }
+
+                       tbuf_id[i] = m_node->buf_id;
+                       DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
+                               i ? "dst" : "src", tbuf_id[i]);
+
+                       ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+                       if (ret)
+                               DRM_ERROR("failed to put m_node.\n");
+               }
+               break;
+       case IPP_CMD_WB:
+               /* clear buf for finding */
+               memset(&qbuf, 0x0, sizeof(qbuf));
+               qbuf.ops_id = EXYNOS_DRM_OPS_DST;
+               qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
+
+               /* get memory node entry */
+               m_node = ipp_find_mem_node(c_node, &qbuf);
+               if (!m_node) {
+                       DRM_ERROR("empty memory node.\n");
+                       return -ENOMEM;
+               }
+
+               tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
+
+               ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+               if (ret)
+                       DRM_ERROR("failed to put m_node.\n");
+               break;
+       case IPP_CMD_OUTPUT:
+               /* source memory list */
+               head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+               m_node = list_first_entry(head,
+                       struct drm_exynos_ipp_mem_node, list);
+               if (!m_node) {
+                       DRM_ERROR("empty memory node.\n");
+                       return -ENOMEM;
+               }
+
+               tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
+
+               ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+               if (ret)
+                       DRM_ERROR("failed to put m_node.\n");
+               break;
+       default:
+               DRM_ERROR("invalid operations.\n");
+               return -EINVAL;
+       }
+
+       if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
+               DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
+                       tbuf_id[1], buf_id[1], property->prop_id);
+
+       /*
+        * command node have event list of destination buffer
+        * If destination buffer enqueue to mem list,
+        * then we make event and link to event list tail.
+        * so, we get first event for first enqueued buffer.
+        */
+       e = list_first_entry(&c_node->event_list,
+               struct drm_exynos_ipp_send_event, base.link);
+
+       if (!e) {
+               DRM_ERROR("empty event.\n");
+               return -EINVAL;
+       }
+
+       do_gettimeofday(&now);
+       DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
+               , __func__, now.tv_sec, now.tv_usec);
+       e->event.tv_sec = now.tv_sec;
+       e->event.tv_usec = now.tv_usec;
+       e->event.prop_id = property->prop_id;
+
+       /* set buffer id about source destination */
+       for_each_ipp_ops(i)
+               e->event.buf_id[i] = tbuf_id[i];
+
+       spin_lock_irqsave(&drm_dev->event_lock, flags);
+       list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+       wake_up_interruptible(&e->base.file_priv->event_wait);
+       spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+       DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__,
+               property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
+
+       return 0;
+}
+
+void ipp_sched_event(struct work_struct *work)
+{
+       struct drm_exynos_ipp_event_work *event_work =
+               (struct drm_exynos_ipp_event_work *)work;
+       struct exynos_drm_ippdrv *ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node;
+       int ret;
+
+       if (!event_work) {
+               DRM_ERROR("failed to get event_work.\n");
+               return;
+       }
+
+       DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
+               event_work->buf_id[EXYNOS_DRM_OPS_DST]);
+
+       ippdrv = event_work->ippdrv;
+       if (!ippdrv) {
+               DRM_ERROR("failed to get ipp driver.\n");
+               return;
+       }
+
+       c_node = ippdrv->cmd;
+       if (!c_node) {
+               DRM_ERROR("failed to get command node.\n");
+               return;
+       }
+
+       /*
+        * IPP supports command thread, event thread synchronization.
+        * If IPP close immediately from user land, then IPP make
+        * synchronization with command thread, so make complete event.
+        * or going out operations.
+        */
+       if (c_node->state != IPP_STATE_START) {
+               DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
+                       __func__, c_node->state, c_node->property.prop_id);
+               goto err_completion;
+       }
+
+       mutex_lock(&c_node->event_lock);
+
+       ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
+       if (ret) {
+               DRM_ERROR("failed to send event.\n");
+               goto err_completion;
+       }
+
+err_completion:
+       if (ipp_is_m2m_cmd(c_node->property.cmd))
+               complete(&c_node->start_complete);
+
+       mutex_unlock(&c_node->event_lock);
+}
+
+static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+       struct ipp_context *ctx = get_ipp_context(dev);
+       struct exynos_drm_ippdrv *ippdrv;
+       int ret, count = 0;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       /* get ipp driver entry */
+       list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+               ippdrv->drm_dev = drm_dev;
+
+               ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
+                       &ippdrv->ipp_id);
+               if (ret) {
+                       DRM_ERROR("failed to create id.\n");
+                       goto err_idr;
+               }
+
+               DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__,
+                       count++, (int)ippdrv, ippdrv->ipp_id);
+
+               if (ippdrv->ipp_id == 0) {
+                       DRM_ERROR("failed to get ipp_id[%d]\n",
+                               ippdrv->ipp_id);
+                       goto err_idr;
+               }
+
+               /* store parent device for node */
+               ippdrv->parent_dev = dev;
+
+               /* store event work queue and handler */
+               ippdrv->event_workq = ctx->event_workq;
+               ippdrv->sched_event = ipp_sched_event;
+               INIT_LIST_HEAD(&ippdrv->cmd_list);
+
+               if (is_drm_iommu_supported(drm_dev)) {
+                       ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
+                       if (ret) {
+                               DRM_ERROR("failed to activate iommu\n");
+                               goto err_iommu;
+                       }
+               }
+       }
+
+       return 0;
+
+err_iommu:
+       /* get ipp driver entry */
+       list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
+               if (is_drm_iommu_supported(drm_dev))
+                       drm_iommu_detach_device(drm_dev, ippdrv->dev);
+
+err_idr:
+       idr_remove_all(&ctx->ipp_idr);
+       idr_remove_all(&ctx->prop_idr);
+       idr_destroy(&ctx->ipp_idr);
+       idr_destroy(&ctx->prop_idr);
+       return ret;
+}
+
+static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+       struct exynos_drm_ippdrv *ippdrv;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       /* get ipp driver entry */
+       list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+               if (is_drm_iommu_supported(drm_dev))
+                       drm_iommu_detach_device(drm_dev, ippdrv->dev);
+
+               ippdrv->drm_dev = NULL;
+               exynos_drm_ippdrv_unregister(ippdrv);
+       }
+}
+
+static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
+               struct drm_file *file)
+{
+       struct drm_exynos_file_private *file_priv = file->driver_priv;
+       struct exynos_drm_ipp_private *priv;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+       if (!priv) {
+               DRM_ERROR("failed to allocate priv.\n");
+               return -ENOMEM;
+       }
+       priv->dev = dev;
+       file_priv->ipp_priv = priv;
+
+       INIT_LIST_HEAD(&priv->event_list);
+
+       DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv);
+
+       return 0;
+}
+
+static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
+               struct drm_file *file)
+{
+       struct drm_exynos_file_private *file_priv = file->driver_priv;
+       struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+       struct exynos_drm_ippdrv *ippdrv = NULL;
+       struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
+       int count = 0;
+
+       DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv);
+
+       if (list_empty(&exynos_drm_ippdrv_list)) {
+               DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
+               goto err_clear;
+       }
+
+       list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+               if (list_empty(&ippdrv->cmd_list))
+                       continue;
+
+               list_for_each_entry_safe(c_node, tc_node,
+                       &ippdrv->cmd_list, list) {
+                       DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
+                               __func__, count++, (int)ippdrv);
+
+                       if (c_node->priv == priv) {
+                               /*
+                                * userland goto unnormal state. process killed.
+                                * and close the file.
+                                * so, IPP didn't called stop cmd ctrl.
+                                * so, we are make stop operation in this state.
+                                */
+                               if (c_node->state == IPP_STATE_START) {
+                                       ipp_stop_property(drm_dev, ippdrv,
+                                               c_node);
+                                       c_node->state = IPP_STATE_STOP;
+                               }
+
+                               ippdrv->dedicated = false;
+                               ipp_clean_cmd_node(c_node);
+                               if (list_empty(&ippdrv->cmd_list))
+                                       pm_runtime_put_sync(ippdrv->dev);
+                       }
+               }
+       }
+
+err_clear:
+       kfree(priv);
+       return;
+}
+
+static int __devinit ipp_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct ipp_context *ctx;
+       struct exynos_drm_subdrv *subdrv;
+       int ret;
+
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       mutex_init(&ctx->ipp_lock);
+       mutex_init(&ctx->prop_lock);
+
+       idr_init(&ctx->ipp_idr);
+       idr_init(&ctx->prop_idr);
+
+       /*
+        * create single thread for ipp event
+        * IPP supports event thread for IPP drivers.
+        * IPP driver send event_work to this thread.
+        * and IPP event thread send event to user process.
+        */
+       ctx->event_workq = create_singlethread_workqueue("ipp_event");
+       if (!ctx->event_workq) {
+               dev_err(dev, "failed to create event workqueue\n");
+               ret = -EINVAL;
+               goto err_clear;
+       }
+
+       /*
+        * create single thread for ipp command
+        * IPP supports command thread for user process.
+        * user process make command node using set property ioctl.
+        * and make start_work and send this work to command thread.
+        * and then this command thread start property.
+        */
+       ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
+       if (!ctx->cmd_workq) {
+               dev_err(dev, "failed to create cmd workqueue\n");
+               ret = -EINVAL;
+               goto err_event_workq;
+       }
+
+       /* set sub driver informations */
+       subdrv = &ctx->subdrv;
+       subdrv->dev = dev;
+       subdrv->probe = ipp_subdrv_probe;
+       subdrv->remove = ipp_subdrv_remove;
+       subdrv->open = ipp_subdrv_open;
+       subdrv->close = ipp_subdrv_close;
+
+       platform_set_drvdata(pdev, ctx);
+
+       ret = exynos_drm_subdrv_register(subdrv);
+       if (ret < 0) {
+               DRM_ERROR("failed to register drm ipp device.\n");
+               goto err_cmd_workq;
+       }
+
+       dev_info(&pdev->dev, "drm ipp registered successfully.\n");
+
+       return 0;
+
+err_cmd_workq:
+       destroy_workqueue(ctx->cmd_workq);
+err_event_workq:
+       destroy_workqueue(ctx->event_workq);
+err_clear:
+       kfree(ctx);
+       return ret;
+}
+
+static int __devexit ipp_remove(struct platform_device *pdev)
+{
+       struct ipp_context *ctx = platform_get_drvdata(pdev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       /* unregister sub driver */
+       exynos_drm_subdrv_unregister(&ctx->subdrv);
+
+       /* remove,destroy ipp idr */
+       idr_remove_all(&ctx->ipp_idr);
+       idr_remove_all(&ctx->prop_idr);
+       idr_destroy(&ctx->ipp_idr);
+       idr_destroy(&ctx->prop_idr);
+
+       mutex_destroy(&ctx->ipp_lock);
+       mutex_destroy(&ctx->prop_lock);
+
+       /* destroy command, event work queue */
+       destroy_workqueue(ctx->cmd_workq);
+       destroy_workqueue(ctx->event_workq);
+
+       kfree(ctx);
+
+       return 0;
+}
+
+static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
+{
+       DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ipp_suspend(struct device *dev)
+{
+       struct ipp_context *ctx = get_ipp_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (pm_runtime_suspended(dev))
+               return 0;
+
+       return ipp_power_ctrl(ctx, false);
+}
+
+static int ipp_resume(struct device *dev)
+{
+       struct ipp_context *ctx = get_ipp_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!pm_runtime_suspended(dev))
+               return ipp_power_ctrl(ctx, true);
+
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int ipp_runtime_suspend(struct device *dev)
+{
+       struct ipp_context *ctx = get_ipp_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       return ipp_power_ctrl(ctx, false);
+}
+
+static int ipp_runtime_resume(struct device *dev)
+{
+       struct ipp_context *ctx = get_ipp_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       return ipp_power_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops ipp_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
+       SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
+};
+
+struct platform_driver ipp_driver = {
+       .probe          = ipp_probe,
+       .remove         = __devexit_p(ipp_remove),
+       .driver         = {
+               .name   = "exynos-drm-ipp",
+               .owner  = THIS_MODULE,
+               .pm     = &ipp_pm_ops,
+       },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
new file mode 100644 (file)
index 0000000..28ffac9
--- /dev/null
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *     Jinyoung Jeon <jy0.jeon@samsung.com>
+ *     Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_IPP_H_
+#define _EXYNOS_DRM_IPP_H_
+
+#define for_each_ipp_ops(pos)  \
+       for (pos = 0; pos < EXYNOS_DRM_OPS_MAX; pos++)
+#define for_each_ipp_planar(pos)       \
+       for (pos = 0; pos < EXYNOS_DRM_PLANAR_MAX; pos++)
+
+#define IPP_GET_LCD_WIDTH      _IOR('F', 302, int)
+#define IPP_GET_LCD_HEIGHT     _IOR('F', 303, int)
+#define IPP_SET_WRITEBACK      _IOW('F', 304, u32)
+
+/* definition of state */
+enum drm_exynos_ipp_state {
+       IPP_STATE_IDLE,
+       IPP_STATE_START,
+       IPP_STATE_STOP,
+};
+
+/*
+ * A structure of command work information.
+ * @work: work structure.
+ * @ippdrv: current work ippdrv.
+ * @c_node: command node information.
+ * @ctrl: command control.
+ */
+struct drm_exynos_ipp_cmd_work {
+       struct work_struct      work;
+       struct exynos_drm_ippdrv        *ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node;
+       enum drm_exynos_ipp_ctrl        ctrl;
+};
+
+/*
+ * A structure of command node.
+ *
+ * @priv: IPP private infomation.
+ * @list: list head to command queue information.
+ * @event_list: list head of event.
+ * @mem_list: list head to source,destination memory queue information.
+ * @cmd_lock: lock for synchronization of access to ioctl.
+ * @mem_lock: lock for synchronization of access to memory nodes.
+ * @event_lock: lock for synchronization of access to scheduled event.
+ * @start_complete: completion of start of command.
+ * @stop_complete: completion of stop of command.
+ * @property: property information.
+ * @start_work: start command work structure.
+ * @stop_work: stop command work structure.
+ * @event_work: event work structure.
+ * @state: state of command node.
+ */
+struct drm_exynos_ipp_cmd_node {
+       struct exynos_drm_ipp_private *priv;
+       struct list_head        list;
+       struct list_head        event_list;
+       struct list_head        mem_list[EXYNOS_DRM_OPS_MAX];
+       struct mutex    cmd_lock;
+       struct mutex    mem_lock;
+       struct mutex    event_lock;
+       struct completion       start_complete;
+       struct completion       stop_complete;
+       struct drm_exynos_ipp_property  property;
+       struct drm_exynos_ipp_cmd_work *start_work;
+       struct drm_exynos_ipp_cmd_work *stop_work;
+       struct drm_exynos_ipp_event_work *event_work;
+       enum drm_exynos_ipp_state       state;
+};
+
+/*
+ * A structure of buffer information.
+ *
+ * @gem_objs: Y, Cb, Cr each gem object.
+ * @base: Y, Cb, Cr each planar address.
+ */
+struct drm_exynos_ipp_buf_info {
+       unsigned long   handles[EXYNOS_DRM_PLANAR_MAX];
+       dma_addr_t      base[EXYNOS_DRM_PLANAR_MAX];
+};
+
+/*
+ * A structure of wb setting infomation.
+ *
+ * @enable: enable flag for wb.
+ * @refresh: HZ of the refresh rate.
+ */
+struct drm_exynos_ipp_set_wb {
+       __u32   enable;
+       __u32   refresh;
+};
+
+/*
+ * A structure of event work information.
+ *
+ * @work: work structure.
+ * @ippdrv: current work ippdrv.
+ * @buf_id: id of src, dst buffer.
+ */
+struct drm_exynos_ipp_event_work {
+       struct work_struct      work;
+       struct exynos_drm_ippdrv *ippdrv;
+       u32     buf_id[EXYNOS_DRM_OPS_MAX];
+};
+
+/*
+ * A structure of source,destination operations.
+ *
+ * @set_fmt: set format of image.
+ * @set_transf: set transform(rotations, flip).
+ * @set_size: set size of region.
+ * @set_addr: set address for dma.
+ */
+struct exynos_drm_ipp_ops {
+       int (*set_fmt)(struct device *dev, u32 fmt);
+       int (*set_transf)(struct device *dev,
+               enum drm_exynos_degree degree,
+               enum drm_exynos_flip flip, bool *swap);
+       int (*set_size)(struct device *dev, int swap,
+               struct drm_exynos_pos *pos, struct drm_exynos_sz *sz);
+       int (*set_addr)(struct device *dev,
+                struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type);
+};
+
+/*
+ * A structure of ipp driver.
+ *
+ * @drv_list: list head for registed sub driver information.
+ * @parent_dev: parent device information.
+ * @dev: platform device.
+ * @drm_dev: drm device.
+ * @ipp_id: id of ipp driver.
+ * @dedicated: dedicated ipp device.
+ * @ops: source, destination operations.
+ * @event_workq: event work queue.
+ * @cmd: current command information.
+ * @cmd_list: list head for command information.
+ * @prop_list: property informations of current ipp driver.
+ * @check_property: check property about format, size, buffer.
+ * @reset: reset ipp block.
+ * @start: ipp each device start.
+ * @stop: ipp each device stop.
+ * @sched_event: work schedule handler.
+ */
+struct exynos_drm_ippdrv {
+       struct list_head        drv_list;
+       struct device   *parent_dev;
+       struct device   *dev;
+       struct drm_device       *drm_dev;
+       u32     ipp_id;
+       bool    dedicated;
+       struct exynos_drm_ipp_ops       *ops[EXYNOS_DRM_OPS_MAX];
+       struct workqueue_struct *event_workq;
+       struct drm_exynos_ipp_cmd_node *cmd;
+       struct list_head        cmd_list;
+       struct drm_exynos_ipp_prop_list *prop_list;
+
+       int (*check_property)(struct device *dev,
+               struct drm_exynos_ipp_property *property);
+       int (*reset)(struct device *dev);
+       int (*start)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+       void (*stop)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+       void (*sched_event)(struct work_struct *work);
+};
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+extern int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+                                        struct drm_file *file);
+extern int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+                                        struct drm_file *file);
+extern int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
+                                        struct drm_file *file);
+extern int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
+                                        struct drm_file *file);
+extern int exynos_drm_ippnb_register(struct notifier_block *nb);
+extern int exynos_drm_ippnb_unregister(struct notifier_block *nb);
+extern int exynos_drm_ippnb_send_event(unsigned long val, void *v);
+extern void ipp_sched_cmd(struct work_struct *work);
+extern void ipp_sched_event(struct work_struct *work);
+
+#else
+static inline int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+       return -ENODEV;
+}
+
+static inline int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+       return -ENODEV;
+}
+
+static inline int exynos_drm_ipp_get_property(struct drm_device *drm_dev,
+                                               void *data,
+                                               struct drm_file *file_priv)
+{
+       return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_set_property(struct drm_device *drm_dev,
+                                               void *data,
+                                               struct drm_file *file_priv)
+{
+       return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev,
+                                               void *data,
+                                               struct drm_file *file)
+{
+       return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev,
+                                               void *data,
+                                               struct drm_file *file)
+{
+       return -ENOTTY;
+}
+
+static inline int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+       return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+       return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+       return -ENOTTY;
+}
+#endif
+
+#endif /* _EXYNOS_DRM_IPP_H_ */
+
index 60b877a..83efc66 100644 (file)
@@ -40,7 +40,7 @@ static const uint32_t formats[] = {
  * CRTC ----------------
  *      ^ start        ^ end
  *
- * There are six cases from a to b.
+ * There are six cases from a to f.
  *
  *             <----- SCREEN ----->
  *             0                 last
@@ -93,11 +93,9 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
                }
 
                overlay->dma_addr[i] = buffer->dma_addr;
-               overlay->vaddr[i] = buffer->kvaddr;
 
-               DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
-                               i, (unsigned long)overlay->vaddr[i],
-                               (unsigned long)overlay->dma_addr[i]);
+               DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
+                               i, (unsigned long)overlay->dma_addr[i]);
        }
 
        actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay);
@@ -106,16 +104,12 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
        if (crtc_x < 0) {
                if (actual_w)
                        src_x -= crtc_x;
-               else
-                       src_x += crtc_w;
                crtc_x = 0;
        }
 
        if (crtc_y < 0) {
                if (actual_h)
                        src_y -= crtc_y;
-               else
-                       src_y += crtc_h;
                crtc_y = 0;
        }
 
@@ -204,7 +198,6 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                return ret;
 
        plane->crtc = crtc;
-       plane->fb = crtc->fb;
 
        exynos_plane_commit(plane);
        exynos_plane_dpms(plane, DRM_MODE_DPMS_ON);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
new file mode 100644 (file)
index 0000000..1c23660
--- /dev/null
@@ -0,0 +1,855 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ *     YoungJun Cho <yj44.cho@samsung.com>
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-rotator.h"
+#include "exynos_drm.h"
+#include "exynos_drm_ipp.h"
+
+/*
+ * Rotator supports image crop/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> Rotator H/W ----> Memory.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. need to add supported list in prop_list.
+ */
+
+#define get_rot_context(dev)   platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv)    container_of(ippdrv,\
+                                       struct rot_context, ippdrv);
+#define rot_read(offset)               readl(rot->regs + (offset))
+#define rot_write(cfg, offset) writel(cfg, rot->regs + (offset))
+
+enum rot_irq_status {
+       ROT_IRQ_STATUS_COMPLETE = 8,
+       ROT_IRQ_STATUS_ILLEGAL  = 9,
+};
+
+/*
+ * A structure of limitation.
+ *
+ * @min_w: minimum width.
+ * @min_h: minimum height.
+ * @max_w: maximum width.
+ * @max_h: maximum height.
+ * @align: align size.
+ */
+struct rot_limit {
+       u32     min_w;
+       u32     min_h;
+       u32     max_w;
+       u32     max_h;
+       u32     align;
+};
+
+/*
+ * A structure of limitation table.
+ *
+ * @ycbcr420_2p: case of YUV.
+ * @rgb888: case of RGB.
+ */
+struct rot_limit_table {
+       struct rot_limit        ycbcr420_2p;
+       struct rot_limit        rgb888;
+};
+
+/*
+ * A structure of rotator context.
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @clock: rotator gate clock.
+ * @limit_tbl: limitation of rotator.
+ * @irq: irq number.
+ * @cur_buf_id: current operation buffer id.
+ * @suspended: suspended state.
+ */
+struct rot_context {
+       struct exynos_drm_ippdrv        ippdrv;
+       struct resource *regs_res;
+       void __iomem    *regs;
+       struct clk      *clock;
+       struct rot_limit_table  *limit_tbl;
+       int     irq;
+       int     cur_buf_id[EXYNOS_DRM_OPS_MAX];
+       bool    suspended;
+};
+
+static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
+{
+       u32 val = rot_read(ROT_CONFIG);
+
+       if (enable == true)
+               val |= ROT_CONFIG_IRQ;
+       else
+               val &= ~ROT_CONFIG_IRQ;
+
+       rot_write(val, ROT_CONFIG);
+}
+
+static u32 rotator_reg_get_fmt(struct rot_context *rot)
+{
+       u32 val = rot_read(ROT_CONTROL);
+
+       val &= ROT_CONTROL_FMT_MASK;
+
+       return val;
+}
+
+static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
+{
+       u32 val = rot_read(ROT_STATUS);
+
+       val = ROT_STATUS_IRQ(val);
+
+       if (val == ROT_STATUS_IRQ_VAL_COMPLETE)
+               return ROT_IRQ_STATUS_COMPLETE;
+
+       return ROT_IRQ_STATUS_ILLEGAL;
+}
+
+static irqreturn_t rotator_irq_handler(int irq, void *arg)
+{
+       struct rot_context *rot = arg;
+       struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_event_work *event_work = c_node->event_work;
+       enum rot_irq_status irq_status;
+       u32 val;
+
+       /* Get execution result */
+       irq_status = rotator_reg_get_irq_status(rot);
+
+       /* clear status */
+       val = rot_read(ROT_STATUS);
+       val |= ROT_STATUS_IRQ_PENDING((u32)irq_status);
+       rot_write(val, ROT_STATUS);
+
+       if (irq_status == ROT_IRQ_STATUS_COMPLETE) {
+               event_work->ippdrv = ippdrv;
+               event_work->buf_id[EXYNOS_DRM_OPS_DST] =
+                       rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
+               queue_work(ippdrv->event_workq,
+                       (struct work_struct *)event_work);
+       } else
+               DRM_ERROR("the SFR is set illegally\n");
+
+       return IRQ_HANDLED;
+}
+
+static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize,
+               u32 *vsize)
+{
+       struct rot_limit_table *limit_tbl = rot->limit_tbl;
+       struct rot_limit *limit;
+       u32 mask, val;
+
+       /* Get size limit */
+       if (fmt == ROT_CONTROL_FMT_RGB888)
+               limit = &limit_tbl->rgb888;
+       else
+               limit = &limit_tbl->ycbcr420_2p;
+
+       /* Get mask for rounding to nearest aligned val */
+       mask = ~((1 << limit->align) - 1);
+
+       /* Set aligned width */
+       val = ROT_ALIGN(*hsize, limit->align, mask);
+       if (val < limit->min_w)
+               *hsize = ROT_MIN(limit->min_w, mask);
+       else if (val > limit->max_w)
+               *hsize = ROT_MAX(limit->max_w, mask);
+       else
+               *hsize = val;
+
+       /* Set aligned height */
+       val = ROT_ALIGN(*vsize, limit->align, mask);
+       if (val < limit->min_h)
+               *vsize = ROT_MIN(limit->min_h, mask);
+       else if (val > limit->max_h)
+               *vsize = ROT_MAX(limit->max_h, mask);
+       else
+               *vsize = val;
+}
+
+static int rotator_src_set_fmt(struct device *dev, u32 fmt)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+       u32 val;
+
+       val = rot_read(ROT_CONTROL);
+       val &= ~ROT_CONTROL_FMT_MASK;
+
+       switch (fmt) {
+       case DRM_FORMAT_NV12:
+               val |= ROT_CONTROL_FMT_YCBCR420_2P;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               val |= ROT_CONTROL_FMT_RGB888;
+               break;
+       default:
+               DRM_ERROR("invalid image format\n");
+               return -EINVAL;
+       }
+
+       rot_write(val, ROT_CONTROL);
+
+       return 0;
+}
+
+static inline bool rotator_check_reg_fmt(u32 fmt)
+{
+       if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) ||
+           (fmt == ROT_CONTROL_FMT_RGB888))
+               return true;
+
+       return false;
+}
+
+static int rotator_src_set_size(struct device *dev, int swap,
+               struct drm_exynos_pos *pos,
+               struct drm_exynos_sz *sz)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+       u32 fmt, hsize, vsize;
+       u32 val;
+
+       /* Get format */
+       fmt = rotator_reg_get_fmt(rot);
+       if (!rotator_check_reg_fmt(fmt)) {
+               DRM_ERROR("%s:invalid format.\n", __func__);
+               return -EINVAL;
+       }
+
+       /* Align buffer size */
+       hsize = sz->hsize;
+       vsize = sz->vsize;
+       rotator_align_size(rot, fmt, &hsize, &vsize);
+
+       /* Set buffer size configuration */
+       val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+       rot_write(val, ROT_SRC_BUF_SIZE);
+
+       /* Set crop image position configuration */
+       val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+       rot_write(val, ROT_SRC_CROP_POS);
+       val = ROT_SRC_CROP_SIZE_H(pos->h) | ROT_SRC_CROP_SIZE_W(pos->w);
+       rot_write(val, ROT_SRC_CROP_SIZE);
+
+       return 0;
+}
+
+static int rotator_src_set_addr(struct device *dev,
+               struct drm_exynos_ipp_buf_info *buf_info,
+               u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+       dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+       u32 val, fmt, hsize, vsize;
+       int i;
+
+       /* Set current buf_id */
+       rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id;
+
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               /* Set address configuration */
+               for_each_ipp_planar(i)
+                       addr[i] = buf_info->base[i];
+
+               /* Get format */
+               fmt = rotator_reg_get_fmt(rot);
+               if (!rotator_check_reg_fmt(fmt)) {
+                       DRM_ERROR("%s:invalid format.\n", __func__);
+                       return -EINVAL;
+               }
+
+               /* Re-set cb planar for NV12 format */
+               if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+                   !addr[EXYNOS_DRM_PLANAR_CB]) {
+
+                       val = rot_read(ROT_SRC_BUF_SIZE);
+                       hsize = ROT_GET_BUF_SIZE_W(val);
+                       vsize = ROT_GET_BUF_SIZE_H(val);
+
+                       /* Set cb planar */
+                       addr[EXYNOS_DRM_PLANAR_CB] =
+                               addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+               }
+
+               for_each_ipp_planar(i)
+                       rot_write(addr[i], ROT_SRC_BUF_ADDR(i));
+               break;
+       case IPP_BUF_DEQUEUE:
+               for_each_ipp_planar(i)
+                       rot_write(0x0, ROT_SRC_BUF_ADDR(i));
+               break;
+       default:
+               /* Nothing to do */
+               break;
+       }
+
+       return 0;
+}
+
+static int rotator_dst_set_transf(struct device *dev,
+               enum drm_exynos_degree degree,
+               enum drm_exynos_flip flip, bool *swap)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+       u32 val;
+
+       /* Set transform configuration */
+       val = rot_read(ROT_CONTROL);
+       val &= ~ROT_CONTROL_FLIP_MASK;
+
+       switch (flip) {
+       case EXYNOS_DRM_FLIP_VERTICAL:
+               val |= ROT_CONTROL_FLIP_VERTICAL;
+               break;
+       case EXYNOS_DRM_FLIP_HORIZONTAL:
+               val |= ROT_CONTROL_FLIP_HORIZONTAL;
+               break;
+       default:
+               /* Flip None */
+               break;
+       }
+
+       val &= ~ROT_CONTROL_ROT_MASK;
+
+       switch (degree) {
+       case EXYNOS_DRM_DEGREE_90:
+               val |= ROT_CONTROL_ROT_90;
+               break;
+       case EXYNOS_DRM_DEGREE_180:
+               val |= ROT_CONTROL_ROT_180;
+               break;
+       case EXYNOS_DRM_DEGREE_270:
+               val |= ROT_CONTROL_ROT_270;
+               break;
+       default:
+               /* Rotation 0 Degree */
+               break;
+       }
+
+       rot_write(val, ROT_CONTROL);
+
+       /* Check degree for setting buffer size swap */
+       if ((degree == EXYNOS_DRM_DEGREE_90) ||
+           (degree == EXYNOS_DRM_DEGREE_270))
+               *swap = true;
+       else
+               *swap = false;
+
+       return 0;
+}
+
+static int rotator_dst_set_size(struct device *dev, int swap,
+               struct drm_exynos_pos *pos,
+               struct drm_exynos_sz *sz)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+       u32 val, fmt, hsize, vsize;
+
+       /* Get format */
+       fmt = rotator_reg_get_fmt(rot);
+       if (!rotator_check_reg_fmt(fmt)) {
+               DRM_ERROR("%s:invalid format.\n", __func__);
+               return -EINVAL;
+       }
+
+       /* Align buffer size */
+       hsize = sz->hsize;
+       vsize = sz->vsize;
+       rotator_align_size(rot, fmt, &hsize, &vsize);
+
+       /* Set buffer size configuration */
+       val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+       rot_write(val, ROT_DST_BUF_SIZE);
+
+       /* Set crop image position configuration */
+       val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+       rot_write(val, ROT_DST_CROP_POS);
+
+       return 0;
+}
+
+static int rotator_dst_set_addr(struct device *dev,
+               struct drm_exynos_ipp_buf_info *buf_info,
+               u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+       dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+       u32 val, fmt, hsize, vsize;
+       int i;
+
+       /* Set current buf_id */
+       rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               /* Set address configuration */
+               for_each_ipp_planar(i)
+                       addr[i] = buf_info->base[i];
+
+               /* Get format */
+               fmt = rotator_reg_get_fmt(rot);
+               if (!rotator_check_reg_fmt(fmt)) {
+                       DRM_ERROR("%s:invalid format.\n", __func__);
+                       return -EINVAL;
+               }
+
+               /* Re-set cb planar for NV12 format */
+               if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+                   !addr[EXYNOS_DRM_PLANAR_CB]) {
+                       /* Get buf size */
+                       val = rot_read(ROT_DST_BUF_SIZE);
+
+                       hsize = ROT_GET_BUF_SIZE_W(val);
+                       vsize = ROT_GET_BUF_SIZE_H(val);
+
+                       /* Set cb planar */
+                       addr[EXYNOS_DRM_PLANAR_CB] =
+                               addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+               }
+
+               for_each_ipp_planar(i)
+                       rot_write(addr[i], ROT_DST_BUF_ADDR(i));
+               break;
+       case IPP_BUF_DEQUEUE:
+               for_each_ipp_planar(i)
+                       rot_write(0x0, ROT_DST_BUF_ADDR(i));
+               break;
+       default:
+               /* Nothing to do */
+               break;
+       }
+
+       return 0;
+}
+
+static struct exynos_drm_ipp_ops rot_src_ops = {
+       .set_fmt        =       rotator_src_set_fmt,
+       .set_size       =       rotator_src_set_size,
+       .set_addr       =       rotator_src_set_addr,
+};
+
+static struct exynos_drm_ipp_ops rot_dst_ops = {
+       .set_transf     =       rotator_dst_set_transf,
+       .set_size       =       rotator_dst_set_size,
+       .set_addr       =       rotator_dst_set_addr,
+};
+
+static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+       struct drm_exynos_ipp_prop_list *prop_list;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+       if (!prop_list) {
+               DRM_ERROR("failed to alloc property list.\n");
+               return -ENOMEM;
+       }
+
+       prop_list->version = 1;
+       prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+                               (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+       prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+                               (1 << EXYNOS_DRM_DEGREE_90) |
+                               (1 << EXYNOS_DRM_DEGREE_180) |
+                               (1 << EXYNOS_DRM_DEGREE_270);
+       prop_list->csc = 0;
+       prop_list->crop = 0;
+       prop_list->scale = 0;
+
+       ippdrv->prop_list = prop_list;
+
+       return 0;
+}
+
+static inline bool rotator_check_drm_fmt(u32 fmt)
+{
+       switch (fmt) {
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_NV12:
+               return true;
+       default:
+               DRM_DEBUG_KMS("%s:not support format\n", __func__);
+               return false;
+       }
+}
+
+static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip)
+{
+       switch (flip) {
+       case EXYNOS_DRM_FLIP_NONE:
+       case EXYNOS_DRM_FLIP_VERTICAL:
+       case EXYNOS_DRM_FLIP_HORIZONTAL:
+               return true;
+       default:
+               DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+               return false;
+       }
+}
+
+static int rotator_ippdrv_check_property(struct device *dev,
+               struct drm_exynos_ipp_property *property)
+{
+       struct drm_exynos_ipp_config *src_config =
+                                       &property->config[EXYNOS_DRM_OPS_SRC];
+       struct drm_exynos_ipp_config *dst_config =
+                                       &property->config[EXYNOS_DRM_OPS_DST];
+       struct drm_exynos_pos *src_pos = &src_config->pos;
+       struct drm_exynos_pos *dst_pos = &dst_config->pos;
+       struct drm_exynos_sz *src_sz = &src_config->sz;
+       struct drm_exynos_sz *dst_sz = &dst_config->sz;
+       bool swap = false;
+
+       /* Check format configuration */
+       if (src_config->fmt != dst_config->fmt) {
+               DRM_DEBUG_KMS("%s:not support csc feature\n", __func__);
+               return -EINVAL;
+       }
+
+       if (!rotator_check_drm_fmt(dst_config->fmt)) {
+               DRM_DEBUG_KMS("%s:invalid format\n", __func__);
+               return -EINVAL;
+       }
+
+       /* Check transform configuration */
+       if (src_config->degree != EXYNOS_DRM_DEGREE_0) {
+               DRM_DEBUG_KMS("%s:not support source-side rotation\n",
+                       __func__);
+               return -EINVAL;
+       }
+
+       switch (dst_config->degree) {
+       case EXYNOS_DRM_DEGREE_90:
+       case EXYNOS_DRM_DEGREE_270:
+               swap = true;
+       case EXYNOS_DRM_DEGREE_0:
+       case EXYNOS_DRM_DEGREE_180:
+               /* No problem */
+               break;
+       default:
+               DRM_DEBUG_KMS("%s:invalid degree\n", __func__);
+               return -EINVAL;
+       }
+
+       if (src_config->flip != EXYNOS_DRM_FLIP_NONE) {
+               DRM_DEBUG_KMS("%s:not support source-side flip\n", __func__);
+               return -EINVAL;
+       }
+
+       if (!rotator_check_drm_flip(dst_config->flip)) {
+               DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+               return -EINVAL;
+       }
+
+       /* Check size configuration */
+       if ((src_pos->x + src_pos->w > src_sz->hsize) ||
+               (src_pos->y + src_pos->h > src_sz->vsize)) {
+               DRM_DEBUG_KMS("%s:out of source buffer bound\n", __func__);
+               return -EINVAL;
+       }
+
+       if (swap) {
+               if ((dst_pos->x + dst_pos->h > dst_sz->vsize) ||
+                       (dst_pos->y + dst_pos->w > dst_sz->hsize)) {
+                       DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
+                               __func__);
+                       return -EINVAL;
+               }
+
+               if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) {
+                       DRM_DEBUG_KMS("%s:not support scale feature\n",
+                               __func__);
+                       return -EINVAL;
+               }
+       } else {
+               if ((dst_pos->x + dst_pos->w > dst_sz->hsize) ||
+                       (dst_pos->y + dst_pos->h > dst_sz->vsize)) {
+                       DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
+                               __func__);
+                       return -EINVAL;
+               }
+
+               if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) {
+                       DRM_DEBUG_KMS("%s:not support scale feature\n",
+                               __func__);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+       u32 val;
+
+       if (rot->suspended) {
+               DRM_ERROR("suspended state\n");
+               return -EPERM;
+       }
+
+       if (cmd != IPP_CMD_M2M) {
+               DRM_ERROR("not support cmd: %d\n", cmd);
+               return -EINVAL;
+       }
+
+       /* Set interrupt enable */
+       rotator_reg_set_irq(rot, true);
+
+       val = rot_read(ROT_CONTROL);
+       val |= ROT_CONTROL_START;
+
+       rot_write(val, ROT_CONTROL);
+
+       return 0;
+}
+
+static int __devinit rotator_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct rot_context *rot;
+       struct exynos_drm_ippdrv *ippdrv;
+       int ret;
+
+       rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
+       if (!rot) {
+               dev_err(dev, "failed to allocate rot\n");
+               return -ENOMEM;
+       }
+
+       rot->limit_tbl = (struct rot_limit_table *)
+                               platform_get_device_id(pdev)->driver_data;
+
+       rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!rot->regs_res) {
+               dev_err(dev, "failed to find registers\n");
+               ret = -ENOENT;
+               goto err_get_resource;
+       }
+
+       rot->regs = devm_request_and_ioremap(dev, rot->regs_res);
+       if (!rot->regs) {
+               dev_err(dev, "failed to map register\n");
+               ret = -ENXIO;
+               goto err_get_resource;
+       }
+
+       rot->irq = platform_get_irq(pdev, 0);
+       if (rot->irq < 0) {
+               dev_err(dev, "failed to get irq\n");
+               ret = rot->irq;
+               goto err_get_irq;
+       }
+
+       ret = request_threaded_irq(rot->irq, NULL, rotator_irq_handler,
+                       IRQF_ONESHOT, "drm_rotator", rot);
+       if (ret < 0) {
+               dev_err(dev, "failed to request irq\n");
+               goto err_get_irq;
+       }
+
+       rot->clock = clk_get(dev, "rotator");
+       if (IS_ERR_OR_NULL(rot->clock)) {
+               dev_err(dev, "failed to get clock\n");
+               ret = PTR_ERR(rot->clock);
+               goto err_clk_get;
+       }
+
+       pm_runtime_enable(dev);
+
+       ippdrv = &rot->ippdrv;
+       ippdrv->dev = dev;
+       ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops;
+       ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops;
+       ippdrv->check_property = rotator_ippdrv_check_property;
+       ippdrv->start = rotator_ippdrv_start;
+       ret = rotator_init_prop_list(ippdrv);
+       if (ret < 0) {
+               dev_err(dev, "failed to init property list.\n");
+               goto err_ippdrv_register;
+       }
+
+       DRM_DEBUG_KMS("%s:ippdrv[0x%x]\n", __func__, (int)ippdrv);
+
+       platform_set_drvdata(pdev, rot);
+
+       ret = exynos_drm_ippdrv_register(ippdrv);
+       if (ret < 0) {
+               dev_err(dev, "failed to register drm rotator device\n");
+               goto err_ippdrv_register;
+       }
+
+       dev_info(dev, "The exynos rotator is probed successfully\n");
+
+       return 0;
+
+err_ippdrv_register:
+       devm_kfree(dev, ippdrv->prop_list);
+       pm_runtime_disable(dev);
+       clk_put(rot->clock);
+err_clk_get:
+       free_irq(rot->irq, rot);
+err_get_irq:
+       devm_iounmap(dev, rot->regs);
+err_get_resource:
+       devm_kfree(dev, rot);
+       return ret;
+}
+
+static int __devexit rotator_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct rot_context *rot = dev_get_drvdata(dev);
+       struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+
+       devm_kfree(dev, ippdrv->prop_list);
+       exynos_drm_ippdrv_unregister(ippdrv);
+
+       pm_runtime_disable(dev);
+       clk_put(rot->clock);
+
+       free_irq(rot->irq, rot);
+       devm_iounmap(dev, rot->regs);
+
+       devm_kfree(dev, rot);
+
+       return 0;
+}
+
+struct rot_limit_table rot_limit_tbl = {
+       .ycbcr420_2p = {
+               .min_w = 32,
+               .min_h = 32,
+               .max_w = SZ_32K,
+               .max_h = SZ_32K,
+               .align = 3,
+       },
+       .rgb888 = {
+               .min_w = 8,
+               .min_h = 8,
+               .max_w = SZ_8K,
+               .max_h = SZ_8K,
+               .align = 2,
+       },
+};
+
+struct platform_device_id rotator_driver_ids[] = {
+       {
+               .name           = "exynos-rot",
+               .driver_data    = (unsigned long)&rot_limit_tbl,
+       },
+       {},
+};
+
+static int rotator_clk_crtl(struct rot_context *rot, bool enable)
+{
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (enable) {
+               clk_enable(rot->clock);
+               rot->suspended = false;
+       } else {
+               clk_disable(rot->clock);
+               rot->suspended = true;
+       }
+
+       return 0;
+}
+
+
+#ifdef CONFIG_PM_SLEEP
+static int rotator_suspend(struct device *dev)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (pm_runtime_suspended(dev))
+               return 0;
+
+       return rotator_clk_crtl(rot, false);
+}
+
+static int rotator_resume(struct device *dev)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!pm_runtime_suspended(dev))
+               return rotator_clk_crtl(rot, true);
+
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int rotator_runtime_suspend(struct device *dev)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       return  rotator_clk_crtl(rot, false);
+}
+
+static int rotator_runtime_resume(struct device *dev)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       return  rotator_clk_crtl(rot, true);
+}
+#endif
+
+static const struct dev_pm_ops rotator_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume)
+       SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
+                                                                       NULL)
+};
+
+struct platform_driver rotator_driver = {
+       .probe          = rotator_probe,
+       .remove         = __devexit_p(rotator_remove),
+       .id_table       = rotator_driver_ids,
+       .driver         = {
+               .name   = "exynos-rot",
+               .owner  = THIS_MODULE,
+               .pm     = &rotator_pm_ops,
+       },
+};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
new file mode 100644 (file)
index 0000000..a2d7a14
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *     YoungJun Cho <yj44.cho@samsung.com>
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef        _EXYNOS_DRM_ROTATOR_H_
+#define        _EXYNOS_DRM_ROTATOR_H_
+
+/* TODO */
+
+#endif
index e4b8a8f..99bfc38 100644 (file)
@@ -39,7 +39,6 @@ struct vidi_win_data {
        unsigned int            fb_height;
        unsigned int            bpp;
        dma_addr_t              dma_addr;
-       void __iomem            *vaddr;
        unsigned int            buf_offsize;
        unsigned int            line_size;      /* bytes */
        bool                    enabled;
@@ -294,7 +293,6 @@ static void vidi_win_mode_set(struct device *dev,
        win_data->fb_width = overlay->fb_width;
        win_data->fb_height = overlay->fb_height;
        win_data->dma_addr = overlay->dma_addr[0] + offset;
-       win_data->vaddr = overlay->vaddr[0] + offset;
        win_data->bpp = overlay->bpp;
        win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
                                (overlay->bpp >> 3);
@@ -309,9 +307,7 @@ static void vidi_win_mode_set(struct device *dev,
                        win_data->offset_x, win_data->offset_y);
        DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
                        win_data->ovl_width, win_data->ovl_height);
-       DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
-                       (unsigned long)win_data->dma_addr,
-                       (unsigned long)win_data->vaddr);
+       DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
        DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
                        overlay->fb_width, overlay->crtc_width);
 }
@@ -382,7 +378,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
        struct drm_pending_vblank_event *e, *t;
        struct timeval now;
        unsigned long flags;
-       bool is_checked = false;
 
        spin_lock_irqsave(&drm_dev->event_lock, flags);
 
@@ -392,8 +387,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
                if (crtc != e->pipe)
                        continue;
 
-               is_checked = true;
-
                do_gettimeofday(&now);
                e->event.sequence = 0;
                e->event.tv_sec = now.tv_sec;
@@ -401,22 +394,7 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
 
                list_move_tail(&e->base.link, &e->base.file_priv->event_list);
                wake_up_interruptible(&e->base.file_priv->event_wait);
-       }
-
-       if (is_checked) {
-               /*
-                * call drm_vblank_put only in case that drm_vblank_get was
-                * called.
-                */
-               if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
-                       drm_vblank_put(drm_dev, crtc);
-
-               /*
-                * don't off vblank if vblank_disable_allowed is 1,
-                * because vblank would be off by timer handler.
-                */
-               if (!drm_dev->vblank_disable_allowed)
-                       drm_vblank_off(drm_dev, crtc);
+               drm_vblank_put(drm_dev, crtc);
        }
 
        spin_unlock_irqrestore(&drm_dev->event_lock, flags);
index 2c115f8..2c46b6c 100644 (file)
 #define MAX_HEIGHT             1080
 #define get_hdmi_context(dev)  platform_get_drvdata(to_platform_device(dev))
 
+/* AVI header and aspect ratio */
+#define HDMI_AVI_VERSION               0x02
+#define HDMI_AVI_LENGTH                0x0D
+#define AVI_PIC_ASPECT_RATIO_16_9      (2 << 4)
+#define AVI_SAME_AS_PIC_ASPECT_RATIO   8
+
+/* AUI header info */
+#define HDMI_AUI_VERSION       0x01
+#define HDMI_AUI_LENGTH        0x0A
+
+/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
+enum HDMI_PACKET_TYPE {
+       /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
+       /* InfoFrame packet type */
+       HDMI_PACKET_TYPE_INFOFRAME = 0x80,
+       /* Vendor-Specific InfoFrame */
+       HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
+       /* Auxiliary Video information InfoFrame */
+       HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
+       /* Audio information InfoFrame */
+       HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
+};
+
 enum hdmi_type {
        HDMI_TYPE13,
        HDMI_TYPE14,
@@ -74,6 +97,7 @@ struct hdmi_context {
        struct mutex                    hdmi_mutex;
 
        void __iomem                    *regs;
+       void                            *parent_ctx;
        int                             external_irq;
        int                             internal_irq;
 
@@ -84,7 +108,6 @@ struct hdmi_context {
        int cur_conf;
 
        struct hdmi_resources           res;
-       void                            *parent_ctx;
 
        int                             hpd_gpio;
 
@@ -182,6 +205,7 @@ struct hdmi_v13_conf {
        int height;
        int vrefresh;
        bool interlace;
+       int cea_video_id;
        const u8 *hdmiphy_data;
        const struct hdmi_v13_preset_conf *conf;
 };
@@ -353,15 +377,20 @@ static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = {
 };
 
 static const struct hdmi_v13_conf hdmi_v13_confs[] = {
-       { 1280, 720, 60, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
-       { 1280, 720, 50, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
-       { 720, 480, 60, false, hdmiphy_v13_conf27_027, &hdmi_v13_conf_480p },
-       { 1920, 1080, 50, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i50 },
-       { 1920, 1080, 50, false, hdmiphy_v13_conf148_5,
-                                &hdmi_v13_conf_1080p50 },
-       { 1920, 1080, 60, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i60 },
-       { 1920, 1080, 60, false, hdmiphy_v13_conf148_5,
-                                &hdmi_v13_conf_1080p60 },
+       { 1280, 720, 60, false, 4, hdmiphy_v13_conf74_25,
+                       &hdmi_v13_conf_720p60 },
+       { 1280, 720, 50, false, 19, hdmiphy_v13_conf74_25,
+                       &hdmi_v13_conf_720p60 },
+       { 720, 480, 60, false, 3, hdmiphy_v13_conf27_027,
+                       &hdmi_v13_conf_480p },
+       { 1920, 1080, 50, true, 20, hdmiphy_v13_conf74_25,
+                       &hdmi_v13_conf_1080i50 },
+       { 1920, 1080, 50, false, 31, hdmiphy_v13_conf148_5,
+                       &hdmi_v13_conf_1080p50 },
+       { 1920, 1080, 60, true, 5, hdmiphy_v13_conf74_25,
+                       &hdmi_v13_conf_1080i60 },
+       { 1920, 1080, 60, false, 16, hdmiphy_v13_conf148_5,
+                       &hdmi_v13_conf_1080p60 },
 };
 
 /* HDMI Version 1.4 */
@@ -479,6 +508,7 @@ struct hdmi_conf {
        int height;
        int vrefresh;
        bool interlace;
+       int cea_video_id;
        const u8 *hdmiphy_data;
        const struct hdmi_preset_conf *conf;
 };
@@ -934,16 +964,21 @@ static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
 };
 
 static const struct hdmi_conf hdmi_confs[] = {
-       { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p60 },
-       { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p50 },
-       { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
-       { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
-       { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
-       { 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
-       { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
-       { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
+       { 720, 480, 60, false, 3, hdmiphy_conf27_027, &hdmi_conf_480p60 },
+       { 1280, 720, 50, false, 19, hdmiphy_conf74_25, &hdmi_conf_720p50 },
+       { 1280, 720, 60, false, 4, hdmiphy_conf74_25, &hdmi_conf_720p60 },
+       { 1920, 1080, 50, true, 20, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
+       { 1920, 1080, 60, true, 5, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
+       { 1920, 1080, 30, false, 34, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
+       { 1920, 1080, 50, false, 31, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
+       { 1920, 1080, 60, false, 16, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
 };
 
+struct hdmi_infoframe {
+       enum HDMI_PACKET_TYPE type;
+       u8 ver;
+       u8 len;
+};
 
 static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
 {
@@ -1267,6 +1302,88 @@ static int hdmi_conf_index(struct hdmi_context *hdata,
        return hdmi_v14_conf_index(mode);
 }
 
+static u8 hdmi_chksum(struct hdmi_context *hdata,
+                       u32 start, u8 len, u32 hdr_sum)
+{
+       int i;
+
+       /* hdr_sum : header0 + header1 + header2
+       * start : start address of packet byte1
+       * len : packet bytes - 1 */
+       for (i = 0; i < len; ++i)
+               hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4);
+
+       /* return 2's complement of 8 bit hdr_sum */
+       return (u8)(~(hdr_sum & 0xff) + 1);
+}
+
+static void hdmi_reg_infoframe(struct hdmi_context *hdata,
+                       struct hdmi_infoframe *infoframe)
+{
+       u32 hdr_sum;
+       u8 chksum;
+       u32 aspect_ratio;
+       u32 mod;
+       u32 vic;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
+       if (hdata->dvi_mode) {
+               hdmi_reg_writeb(hdata, HDMI_VSI_CON,
+                               HDMI_VSI_CON_DO_NOT_TRANSMIT);
+               hdmi_reg_writeb(hdata, HDMI_AVI_CON,
+                               HDMI_AVI_CON_DO_NOT_TRANSMIT);
+               hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN);
+               return;
+       }
+
+       switch (infoframe->type) {
+       case HDMI_PACKET_TYPE_AVI:
+               hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
+               hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
+               hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
+               hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
+               hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+
+               /* Output format zero hardcoded ,RGB YBCR selection */
+               hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
+                       AVI_ACTIVE_FORMAT_VALID |
+                       AVI_UNDERSCANNED_DISPLAY_VALID);
+
+               aspect_ratio = AVI_PIC_ASPECT_RATIO_16_9;
+
+               hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio |
+                               AVI_SAME_AS_PIC_ASPECT_RATIO);
+
+               if (hdata->type == HDMI_TYPE13)
+                       vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
+               else
+                       vic = hdmi_confs[hdata->cur_conf].cea_video_id;
+
+               hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
+
+               chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
+                                       infoframe->len, hdr_sum);
+               DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
+               hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
+               break;
+       case HDMI_PACKET_TYPE_AUI:
+               hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
+               hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
+               hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
+               hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
+               hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+               chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
+                                       infoframe->len, hdr_sum);
+               DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
+               hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
+               break;
+       default:
+               break;
+       }
+}
+
 static bool hdmi_is_connected(void *ctx)
 {
        struct hdmi_context *hdata = ctx;
@@ -1293,6 +1410,7 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
                DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
                        (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
                        raw_edid->width_cm, raw_edid->height_cm);
+               kfree(raw_edid);
        } else {
                return -ENODEV;
        }
@@ -1541,6 +1659,8 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
 
 static void hdmi_conf_init(struct hdmi_context *hdata)
 {
+       struct hdmi_infoframe infoframe;
+
        /* disable HPD interrupts */
        hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
                HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
@@ -1575,9 +1695,17 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
                hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
                hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
        } else {
+               infoframe.type = HDMI_PACKET_TYPE_AVI;
+               infoframe.ver = HDMI_AVI_VERSION;
+               infoframe.len = HDMI_AVI_LENGTH;
+               hdmi_reg_infoframe(hdata, &infoframe);
+
+               infoframe.type = HDMI_PACKET_TYPE_AUI;
+               infoframe.ver = HDMI_AUI_VERSION;
+               infoframe.len = HDMI_AUI_LENGTH;
+               hdmi_reg_infoframe(hdata, &infoframe);
+
                /* enable AVI packet every vsync, fixes purple line problem */
-               hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
-               hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5);
                hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
        }
 }
@@ -1875,6 +2003,24 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
        mdelay(10);
 }
 
+static void hdmiphy_poweron(struct hdmi_context *hdata)
+{
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       if (hdata->type == HDMI_TYPE14)
+               hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, 0,
+                       HDMI_PHY_POWER_OFF_EN);
+}
+
+static void hdmiphy_poweroff(struct hdmi_context *hdata)
+{
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       if (hdata->type == HDMI_TYPE14)
+               hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, ~0,
+                       HDMI_PHY_POWER_OFF_EN);
+}
+
 static void hdmiphy_conf_apply(struct hdmi_context *hdata)
 {
        const u8 *hdmiphy_data;
@@ -1978,9 +2124,18 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
                        index = hdmi_v14_conf_index(m);
 
                if (index >= 0) {
+                       struct drm_mode_object base;
+                       struct list_head head;
+
                        DRM_INFO("desired mode doesn't exist so\n");
                        DRM_INFO("use the most suitable mode among modes.\n");
+
+                       /* preserve display mode header while copying. */
+                       head = adjusted_mode->head;
+                       base = adjusted_mode->base;
                        memcpy(adjusted_mode, m, sizeof(*m));
+                       adjusted_mode->head = head;
+                       adjusted_mode->base = base;
                        break;
                }
        }
@@ -2034,12 +2189,12 @@ static void hdmi_poweron(struct hdmi_context *hdata)
 
        mutex_unlock(&hdata->hdmi_mutex);
 
-       pm_runtime_get_sync(hdata->dev);
-
        regulator_bulk_enable(res->regul_count, res->regul_bulk);
        clk_enable(res->hdmiphy);
        clk_enable(res->hdmi);
        clk_enable(res->sclk_hdmi);
+
+       hdmiphy_poweron(hdata);
 }
 
 static void hdmi_poweroff(struct hdmi_context *hdata)
@@ -2058,14 +2213,13 @@ static void hdmi_poweroff(struct hdmi_context *hdata)
         * its reset state seems to meet the condition.
         */
        hdmiphy_conf_reset(hdata);
+       hdmiphy_poweroff(hdata);
 
        clk_disable(res->sclk_hdmi);
        clk_disable(res->hdmi);
        clk_disable(res->hdmiphy);
        regulator_bulk_disable(res->regul_count, res->regul_bulk);
 
-       pm_runtime_put_sync(hdata->dev);
-
        mutex_lock(&hdata->hdmi_mutex);
 
        hdata->powered = false;
@@ -2078,16 +2232,18 @@ static void hdmi_dpms(void *ctx, int mode)
 {
        struct hdmi_context *hdata = ctx;
 
-       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+       DRM_DEBUG_KMS("[%d] %s mode %d\n", __LINE__, __func__, mode);
 
        switch (mode) {
        case DRM_MODE_DPMS_ON:
-               hdmi_poweron(hdata);
+               if (pm_runtime_suspended(hdata->dev))
+                       pm_runtime_get_sync(hdata->dev);
                break;
        case DRM_MODE_DPMS_STANDBY:
        case DRM_MODE_DPMS_SUSPEND:
        case DRM_MODE_DPMS_OFF:
-               hdmi_poweroff(hdata);
+               if (!pm_runtime_suspended(hdata->dev))
+                       pm_runtime_put_sync(hdata->dev);
                break;
        default:
                DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
@@ -2166,27 +2322,27 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
        memset(res, 0, sizeof(*res));
 
        /* get clocks, power */
-       res->hdmi = clk_get(dev, "hdmi");
+       res->hdmi = devm_clk_get(dev, "hdmi");
        if (IS_ERR_OR_NULL(res->hdmi)) {
                DRM_ERROR("failed to get clock 'hdmi'\n");
                goto fail;
        }
-       res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+       res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
        if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
                DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
                goto fail;
        }
-       res->sclk_pixel = clk_get(dev, "sclk_pixel");
+       res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
        if (IS_ERR_OR_NULL(res->sclk_pixel)) {
                DRM_ERROR("failed to get clock 'sclk_pixel'\n");
                goto fail;
        }
-       res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
+       res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
        if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
                DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
                goto fail;
        }
-       res->hdmiphy = clk_get(dev, "hdmiphy");
+       res->hdmiphy = devm_clk_get(dev, "hdmiphy");
        if (IS_ERR_OR_NULL(res->hdmiphy)) {
                DRM_ERROR("failed to get clock 'hdmiphy'\n");
                goto fail;
@@ -2194,7 +2350,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
 
        clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
 
-       res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
+       res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
                sizeof(res->regul_bulk[0]), GFP_KERNEL);
        if (!res->regul_bulk) {
                DRM_ERROR("failed to get memory for regulators\n");
@@ -2204,7 +2360,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
                res->regul_bulk[i].supply = supply[i];
                res->regul_bulk[i].consumer = NULL;
        }
-       ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
+       ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
        if (ret) {
                DRM_ERROR("failed to get regulators\n");
                goto fail;
@@ -2217,28 +2373,6 @@ fail:
        return -ENODEV;
 }
 
-static int hdmi_resources_cleanup(struct hdmi_context *hdata)
-{
-       struct hdmi_resources *res = &hdata->res;
-
-       regulator_bulk_free(res->regul_count, res->regul_bulk);
-       /* kfree is NULL-safe */
-       kfree(res->regul_bulk);
-       if (!IS_ERR_OR_NULL(res->hdmiphy))
-               clk_put(res->hdmiphy);
-       if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
-               clk_put(res->sclk_hdmiphy);
-       if (!IS_ERR_OR_NULL(res->sclk_pixel))
-               clk_put(res->sclk_pixel);
-       if (!IS_ERR_OR_NULL(res->sclk_hdmi))
-               clk_put(res->sclk_hdmi);
-       if (!IS_ERR_OR_NULL(res->hdmi))
-               clk_put(res->hdmi);
-       memset(res, 0, sizeof(*res));
-
-       return 0;
-}
-
 static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
 
 void hdmi_attach_ddc_client(struct i2c_client *ddc)
@@ -2306,6 +2440,7 @@ static struct platform_device_id hdmi_driver_types[] = {
        }
 };
 
+#ifdef CONFIG_OF
 static struct of_device_id hdmi_match_types[] = {
        {
                .compatible = "samsung,exynos5-hdmi",
@@ -2314,6 +2449,7 @@ static struct of_device_id hdmi_match_types[] = {
                /* end node */
        }
 };
+#endif
 
 static int __devinit hdmi_probe(struct platform_device *pdev)
 {
@@ -2366,6 +2502,8 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
                const struct of_device_id *match;
                match = of_match_node(of_match_ptr(hdmi_match_types),
                                        pdev->dev.of_node);
+               if (match == NULL)
+                       return -ENODEV;
                hdata->type = (enum hdmi_type)match->data;
        } else {
                hdata->type = (enum hdmi_type)platform_get_device_id
@@ -2378,36 +2516,32 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
        ret = hdmi_resources_init(hdata);
 
        if (ret) {
-               ret = -EINVAL;
                DRM_ERROR("hdmi_resources_init failed\n");
-               goto err_data;
+               return -EINVAL;
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
                DRM_ERROR("failed to find registers\n");
-               ret = -ENOENT;
-               goto err_resource;
+               return -ENOENT;
        }
 
        hdata->regs = devm_request_and_ioremap(&pdev->dev, res);
        if (!hdata->regs) {
                DRM_ERROR("failed to map registers\n");
-               ret = -ENXIO;
-               goto err_resource;
+               return -ENXIO;
        }
 
-       ret = gpio_request(hdata->hpd_gpio, "HPD");
+       ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD");
        if (ret) {
                DRM_ERROR("failed to request HPD gpio\n");
-               goto err_resource;
+               return ret;
        }
 
        /* DDC i2c driver */
        if (i2c_add_driver(&ddc_driver)) {
                DRM_ERROR("failed to register ddc i2c driver\n");
-               ret = -ENOENT;
-               goto err_gpio;
+               return -ENOENT;
        }
 
        hdata->ddc_port = hdmi_ddc;
@@ -2470,11 +2604,6 @@ err_hdmiphy:
        i2c_del_driver(&hdmiphy_driver);
 err_ddc:
        i2c_del_driver(&ddc_driver);
-err_gpio:
-       gpio_free(hdata->hpd_gpio);
-err_resource:
-       hdmi_resources_cleanup(hdata);
-err_data:
        return ret;
 }
 
@@ -2491,9 +2620,6 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
        free_irq(hdata->internal_irq, hdata);
        free_irq(hdata->external_irq, hdata);
 
-       gpio_free(hdata->hpd_gpio);
-
-       hdmi_resources_cleanup(hdata);
 
        /* hdmiphy i2c driver */
        i2c_del_driver(&hdmiphy_driver);
@@ -2509,6 +2635,8 @@ static int hdmi_suspend(struct device *dev)
        struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
        struct hdmi_context *hdata = ctx->ctx;
 
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
        disable_irq(hdata->internal_irq);
        disable_irq(hdata->external_irq);
 
@@ -2516,6 +2644,11 @@ static int hdmi_suspend(struct device *dev)
        if (ctx->drm_dev)
                drm_helper_hpd_irq_event(ctx->drm_dev);
 
+       if (pm_runtime_suspended(dev)) {
+               DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
+               return 0;
+       }
+
        hdmi_poweroff(hdata);
 
        return 0;
@@ -2526,13 +2659,52 @@ static int hdmi_resume(struct device *dev)
        struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
        struct hdmi_context *hdata = ctx->ctx;
 
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       hdata->hpd = gpio_get_value(hdata->hpd_gpio);
+
        enable_irq(hdata->external_irq);
        enable_irq(hdata->internal_irq);
+
+       if (!pm_runtime_suspended(dev)) {
+               DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
+               return 0;
+       }
+
+       hdmi_poweron(hdata);
+
        return 0;
 }
 #endif
 
-static SIMPLE_DEV_PM_OPS(hdmi_pm_ops, hdmi_suspend, hdmi_resume);
+#ifdef CONFIG_PM_RUNTIME
+static int hdmi_runtime_suspend(struct device *dev)
+{
+       struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+       struct hdmi_context *hdata = ctx->ctx;
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       hdmi_poweroff(hdata);
+
+       return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+       struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+       struct hdmi_context *hdata = ctx->ctx;
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       hdmi_poweron(hdata);
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(hdmi_suspend, hdmi_resume)
+       SET_RUNTIME_PM_OPS(hdmi_runtime_suspend, hdmi_runtime_resume, NULL)
+};
 
 struct platform_driver hdmi_driver = {
        .probe          = hdmi_probe,
@@ -2542,6 +2714,6 @@ struct platform_driver hdmi_driver = {
                .name   = "exynos-hdmi",
                .owner  = THIS_MODULE,
                .pm     = &hdmi_pm_ops,
-               .of_match_table = hdmi_match_types,
+               .of_match_table = of_match_ptr(hdmi_match_types),
        },
 };
index 27d1720..6206056 100644 (file)
@@ -46,6 +46,7 @@ static const struct i2c_device_id hdmiphy_id[] = {
        { },
 };
 
+#ifdef CONFIG_OF
 static struct of_device_id hdmiphy_match_types[] = {
        {
                .compatible = "samsung,exynos5-hdmiphy",
@@ -53,12 +54,13 @@ static struct of_device_id hdmiphy_match_types[] = {
                /* end node */
        }
 };
+#endif
 
 struct i2c_driver hdmiphy_driver = {
        .driver = {
                .name   = "exynos-hdmiphy",
                .owner  = THIS_MODULE,
-               .of_match_table = hdmiphy_match_types,
+               .of_match_table = of_match_ptr(hdmiphy_match_types),
        },
        .id_table = hdmiphy_id,
        .probe          = hdmiphy_probe,
index e7fbb82..21db895 100644 (file)
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_hdmi.h"
+#include "exynos_drm_iommu.h"
 
 #define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
 
 struct hdmi_win_data {
        dma_addr_t              dma_addr;
-       void __iomem            *vaddr;
        dma_addr_t              chroma_dma_addr;
-       void __iomem            *chroma_vaddr;
        uint32_t                pixel_format;
        unsigned int            bpp;
        unsigned int            crtc_x;
@@ -59,6 +58,8 @@ struct hdmi_win_data {
        unsigned int            mode_width;
        unsigned int            mode_height;
        unsigned int            scan_flags;
+       bool                    enabled;
+       bool                    resume;
 };
 
 struct mixer_resources {
@@ -80,6 +81,7 @@ enum mixer_version_id {
 
 struct mixer_context {
        struct device           *dev;
+       struct drm_device       *drm_dev;
        int                     pipe;
        bool                    interlace;
        bool                    powered;
@@ -90,6 +92,9 @@ struct mixer_context {
        struct mixer_resources  mixer_res;
        struct hdmi_win_data    win_data[MIXER_WIN_NR];
        enum mixer_version_id   mxr_ver;
+       void                    *parent_ctx;
+       wait_queue_head_t       wait_vsync_queue;
+       atomic_t                wait_vsync_event;
 };
 
 struct mixer_drv_data {
@@ -665,58 +670,22 @@ static void mixer_win_reset(struct mixer_context *ctx)
        spin_unlock_irqrestore(&res->reg_slock, flags);
 }
 
-static void mixer_poweron(struct mixer_context *ctx)
-{
-       struct mixer_resources *res = &ctx->mixer_res;
-
-       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
-       mutex_lock(&ctx->mixer_mutex);
-       if (ctx->powered) {
-               mutex_unlock(&ctx->mixer_mutex);
-               return;
-       }
-       ctx->powered = true;
-       mutex_unlock(&ctx->mixer_mutex);
-
-       pm_runtime_get_sync(ctx->dev);
-
-       clk_enable(res->mixer);
-       if (ctx->vp_enabled) {
-               clk_enable(res->vp);
-               clk_enable(res->sclk_mixer);
-       }
-
-       mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
-       mixer_win_reset(ctx);
-}
-
-static void mixer_poweroff(struct mixer_context *ctx)
+static int mixer_iommu_on(void *ctx, bool enable)
 {
-       struct mixer_resources *res = &ctx->mixer_res;
-
-       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+       struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+       struct mixer_context *mdata = ctx;
+       struct drm_device *drm_dev;
 
-       mutex_lock(&ctx->mixer_mutex);
-       if (!ctx->powered)
-               goto out;
-       mutex_unlock(&ctx->mixer_mutex);
+       drm_hdmi_ctx = mdata->parent_ctx;
+       drm_dev = drm_hdmi_ctx->drm_dev;
 
-       ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
+       if (is_drm_iommu_supported(drm_dev)) {
+               if (enable)
+                       return drm_iommu_attach_device(drm_dev, mdata->dev);
 
-       clk_disable(res->mixer);
-       if (ctx->vp_enabled) {
-               clk_disable(res->vp);
-               clk_disable(res->sclk_mixer);
+               drm_iommu_detach_device(drm_dev, mdata->dev);
        }
-
-       pm_runtime_put_sync(ctx->dev);
-
-       mutex_lock(&ctx->mixer_mutex);
-       ctx->powered = false;
-
-out:
-       mutex_unlock(&ctx->mixer_mutex);
+       return 0;
 }
 
 static int mixer_enable_vblank(void *ctx, int pipe)
@@ -746,39 +715,6 @@ static void mixer_disable_vblank(void *ctx)
        mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
 }
 
-static void mixer_dpms(void *ctx, int mode)
-{
-       struct mixer_context *mixer_ctx = ctx;
-
-       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
-       switch (mode) {
-       case DRM_MODE_DPMS_ON:
-               mixer_poweron(mixer_ctx);
-               break;
-       case DRM_MODE_DPMS_STANDBY:
-       case DRM_MODE_DPMS_SUSPEND:
-       case DRM_MODE_DPMS_OFF:
-               mixer_poweroff(mixer_ctx);
-               break;
-       default:
-               DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
-               break;
-       }
-}
-
-static void mixer_wait_for_vblank(void *ctx)
-{
-       struct mixer_context *mixer_ctx = ctx;
-       struct mixer_resources *res = &mixer_ctx->mixer_res;
-       int ret;
-
-       ret = wait_for((mixer_reg_read(res, MXR_INT_STATUS) &
-                               MXR_INT_STATUS_VSYNC), 50);
-       if (ret < 0)
-               DRM_DEBUG_KMS("vblank wait timed out.\n");
-}
-
 static void mixer_win_mode_set(void *ctx,
                              struct exynos_drm_overlay *overlay)
 {
@@ -811,9 +747,7 @@ static void mixer_win_mode_set(void *ctx,
        win_data = &mixer_ctx->win_data[win];
 
        win_data->dma_addr = overlay->dma_addr[0];
-       win_data->vaddr = overlay->vaddr[0];
        win_data->chroma_dma_addr = overlay->dma_addr[1];
-       win_data->chroma_vaddr = overlay->vaddr[1];
        win_data->pixel_format = overlay->pixel_format;
        win_data->bpp = overlay->bpp;
 
@@ -845,6 +779,8 @@ static void mixer_win_commit(void *ctx, int win)
                vp_video_buffer(mixer_ctx, win);
        else
                mixer_graph_buffer(mixer_ctx, win);
+
+       mixer_ctx->win_data[win].enabled = true;
 }
 
 static void mixer_win_disable(void *ctx, int win)
@@ -855,6 +791,14 @@ static void mixer_win_disable(void *ctx, int win)
 
        DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
 
+       mutex_lock(&mixer_ctx->mixer_mutex);
+       if (!mixer_ctx->powered) {
+               mutex_unlock(&mixer_ctx->mixer_mutex);
+               mixer_ctx->win_data[win].resume = false;
+               return;
+       }
+       mutex_unlock(&mixer_ctx->mixer_mutex);
+
        spin_lock_irqsave(&res->reg_slock, flags);
        mixer_vsync_set_update(mixer_ctx, false);
 
@@ -862,16 +806,144 @@ static void mixer_win_disable(void *ctx, int win)
 
        mixer_vsync_set_update(mixer_ctx, true);
        spin_unlock_irqrestore(&res->reg_slock, flags);
+
+       mixer_ctx->win_data[win].enabled = false;
+}
+
+static void mixer_wait_for_vblank(void *ctx)
+{
+       struct mixer_context *mixer_ctx = ctx;
+
+       mutex_lock(&mixer_ctx->mixer_mutex);
+       if (!mixer_ctx->powered) {
+               mutex_unlock(&mixer_ctx->mixer_mutex);
+               return;
+       }
+       mutex_unlock(&mixer_ctx->mixer_mutex);
+
+       atomic_set(&mixer_ctx->wait_vsync_event, 1);
+
+       /*
+        * wait for MIXER to signal VSYNC interrupt or return after
+        * timeout which is set to 50ms (refresh rate of 20).
+        */
+       if (!wait_event_timeout(mixer_ctx->wait_vsync_queue,
+                               !atomic_read(&mixer_ctx->wait_vsync_event),
+                               DRM_HZ/20))
+               DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
+
+static void mixer_window_suspend(struct mixer_context *ctx)
+{
+       struct hdmi_win_data *win_data;
+       int i;
+
+       for (i = 0; i < MIXER_WIN_NR; i++) {
+               win_data = &ctx->win_data[i];
+               win_data->resume = win_data->enabled;
+               mixer_win_disable(ctx, i);
+       }
+       mixer_wait_for_vblank(ctx);
+}
+
+static void mixer_window_resume(struct mixer_context *ctx)
+{
+       struct hdmi_win_data *win_data;
+       int i;
+
+       for (i = 0; i < MIXER_WIN_NR; i++) {
+               win_data = &ctx->win_data[i];
+               win_data->enabled = win_data->resume;
+               win_data->resume = false;
+       }
+}
+
+static void mixer_poweron(struct mixer_context *ctx)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       mutex_lock(&ctx->mixer_mutex);
+       if (ctx->powered) {
+               mutex_unlock(&ctx->mixer_mutex);
+               return;
+       }
+       ctx->powered = true;
+       mutex_unlock(&ctx->mixer_mutex);
+
+       clk_enable(res->mixer);
+       if (ctx->vp_enabled) {
+               clk_enable(res->vp);
+               clk_enable(res->sclk_mixer);
+       }
+
+       mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
+       mixer_win_reset(ctx);
+
+       mixer_window_resume(ctx);
+}
+
+static void mixer_poweroff(struct mixer_context *ctx)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       mutex_lock(&ctx->mixer_mutex);
+       if (!ctx->powered)
+               goto out;
+       mutex_unlock(&ctx->mixer_mutex);
+
+       mixer_window_suspend(ctx);
+
+       ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
+
+       clk_disable(res->mixer);
+       if (ctx->vp_enabled) {
+               clk_disable(res->vp);
+               clk_disable(res->sclk_mixer);
+       }
+
+       mutex_lock(&ctx->mixer_mutex);
+       ctx->powered = false;
+
+out:
+       mutex_unlock(&ctx->mixer_mutex);
+}
+
+static void mixer_dpms(void *ctx, int mode)
+{
+       struct mixer_context *mixer_ctx = ctx;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               if (pm_runtime_suspended(mixer_ctx->dev))
+                       pm_runtime_get_sync(mixer_ctx->dev);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               if (!pm_runtime_suspended(mixer_ctx->dev))
+                       pm_runtime_put_sync(mixer_ctx->dev);
+               break;
+       default:
+               DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
+               break;
+       }
 }
 
 static struct exynos_mixer_ops mixer_ops = {
        /* manager */
+       .iommu_on               = mixer_iommu_on,
        .enable_vblank          = mixer_enable_vblank,
        .disable_vblank         = mixer_disable_vblank,
+       .wait_for_vblank        = mixer_wait_for_vblank,
        .dpms                   = mixer_dpms,
 
        /* overlay */
-       .wait_for_vblank        = mixer_wait_for_vblank,
        .win_mode_set           = mixer_win_mode_set,
        .win_commit             = mixer_win_commit,
        .win_disable            = mixer_win_disable,
@@ -884,7 +956,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
        struct drm_pending_vblank_event *e, *t;
        struct timeval now;
        unsigned long flags;
-       bool is_checked = false;
 
        spin_lock_irqsave(&drm_dev->event_lock, flags);
 
@@ -894,7 +965,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
                if (crtc != e->pipe)
                        continue;
 
-               is_checked = true;
                do_gettimeofday(&now);
                e->event.sequence = 0;
                e->event.tv_sec = now.tv_sec;
@@ -902,16 +972,9 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
 
                list_move_tail(&e->base.link, &e->base.file_priv->event_list);
                wake_up_interruptible(&e->base.file_priv->event_wait);
+               drm_vblank_put(drm_dev, crtc);
        }
 
-       if (is_checked)
-               /*
-                * call drm_vblank_put only in case that drm_vblank_get was
-                * called.
-                */
-               if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
-                       drm_vblank_put(drm_dev, crtc);
-
        spin_unlock_irqrestore(&drm_dev->event_lock, flags);
 }
 
@@ -944,6 +1007,12 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
 
                drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
                mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe);
+
+               /* set wait vsync event to zero and wake up queue. */
+               if (atomic_read(&ctx->wait_vsync_event)) {
+                       atomic_set(&ctx->wait_vsync_event, 0);
+                       DRM_WAKEUP(&ctx->wait_vsync_queue);
+               }
        }
 
 out:
@@ -971,57 +1040,45 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
 
        spin_lock_init(&mixer_res->reg_slock);
 
-       mixer_res->mixer = clk_get(dev, "mixer");
+       mixer_res->mixer = devm_clk_get(dev, "mixer");
        if (IS_ERR_OR_NULL(mixer_res->mixer)) {
                dev_err(dev, "failed to get clock 'mixer'\n");
-               ret = -ENODEV;
-               goto fail;
+               return -ENODEV;
        }
 
-       mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+       mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
        if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
                dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
-               ret = -ENODEV;
-               goto fail;
+               return -ENODEV;
        }
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (res == NULL) {
                dev_err(dev, "get memory resource failed.\n");
-               ret = -ENXIO;
-               goto fail;
+               return -ENXIO;
        }
 
        mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start,
                                                        resource_size(res));
        if (mixer_res->mixer_regs == NULL) {
                dev_err(dev, "register mapping failed.\n");
-               ret = -ENXIO;
-               goto fail;
+               return -ENXIO;
        }
 
        res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
        if (res == NULL) {
                dev_err(dev, "get interrupt resource failed.\n");
-               ret = -ENXIO;
-               goto fail;
+               return -ENXIO;
        }
 
        ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
                                                        0, "drm_mixer", ctx);
        if (ret) {
                dev_err(dev, "request interrupt failed.\n");
-               goto fail;
+               return ret;
        }
        mixer_res->irq = res->start;
 
        return 0;
-
-fail:
-       if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
-               clk_put(mixer_res->sclk_hdmi);
-       if (!IS_ERR_OR_NULL(mixer_res->mixer))
-               clk_put(mixer_res->mixer);
-       return ret;
 }
 
 static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
@@ -1031,25 +1088,21 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
        struct device *dev = &pdev->dev;
        struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
        struct resource *res;
-       int ret;
 
-       mixer_res->vp = clk_get(dev, "vp");
+       mixer_res->vp = devm_clk_get(dev, "vp");
        if (IS_ERR_OR_NULL(mixer_res->vp)) {
                dev_err(dev, "failed to get clock 'vp'\n");
-               ret = -ENODEV;
-               goto fail;
+               return -ENODEV;
        }
-       mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer");
+       mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
        if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
                dev_err(dev, "failed to get clock 'sclk_mixer'\n");
-               ret = -ENODEV;
-               goto fail;
+               return -ENODEV;
        }
-       mixer_res->sclk_dac = clk_get(dev, "sclk_dac");
+       mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
        if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
                dev_err(dev, "failed to get clock 'sclk_dac'\n");
-               ret = -ENODEV;
-               goto fail;
+               return -ENODEV;
        }
 
        if (mixer_res->sclk_hdmi)
@@ -1058,28 +1111,17 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
        if (res == NULL) {
                dev_err(dev, "get memory resource failed.\n");
-               ret = -ENXIO;
-               goto fail;
+               return -ENXIO;
        }
 
        mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
                                                        resource_size(res));
        if (mixer_res->vp_regs == NULL) {
                dev_err(dev, "register mapping failed.\n");
-               ret = -ENXIO;
-               goto fail;
+               return -ENXIO;
        }
 
        return 0;
-
-fail:
-       if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
-               clk_put(mixer_res->sclk_dac);
-       if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
-               clk_put(mixer_res->sclk_mixer);
-       if (!IS_ERR_OR_NULL(mixer_res->vp))
-               clk_put(mixer_res->vp);
-       return ret;
 }
 
 static struct mixer_drv_data exynos5_mxr_drv_data = {
@@ -1149,9 +1191,12 @@ static int __devinit mixer_probe(struct platform_device *pdev)
        }
 
        ctx->dev = &pdev->dev;
+       ctx->parent_ctx = (void *)drm_hdmi_ctx;
        drm_hdmi_ctx->ctx = (void *)ctx;
        ctx->vp_enabled = drv->is_vp_enabled;
        ctx->mxr_ver = drv->version;
+       DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+       atomic_set(&ctx->wait_vsync_event, 0);
 
        platform_set_drvdata(pdev, drm_hdmi_ctx);
 
@@ -1202,13 +1247,66 @@ static int mixer_suspend(struct device *dev)
        struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
        struct mixer_context *ctx = drm_hdmi_ctx->ctx;
 
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       if (pm_runtime_suspended(dev)) {
+               DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
+               return 0;
+       }
+
        mixer_poweroff(ctx);
 
        return 0;
 }
+
+static int mixer_resume(struct device *dev)
+{
+       struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+       struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       if (!pm_runtime_suspended(dev)) {
+               DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
+               return 0;
+       }
+
+       mixer_poweron(ctx);
+
+       return 0;
+}
 #endif
 
-static SIMPLE_DEV_PM_OPS(mixer_pm_ops, mixer_suspend, NULL);
+#ifdef CONFIG_PM_RUNTIME
+static int mixer_runtime_suspend(struct device *dev)
+{
+       struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+       struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       mixer_poweroff(ctx);
+
+       return 0;
+}
+
+static int mixer_runtime_resume(struct device *dev)
+{
+       struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+       struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       mixer_poweron(ctx);
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops mixer_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(mixer_suspend, mixer_resume)
+       SET_RUNTIME_PM_OPS(mixer_runtime_suspend, mixer_runtime_resume, NULL)
+};
 
 struct platform_driver mixer_driver = {
        .driver = {
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
new file mode 100644 (file)
index 0000000..b4f9ca1
--- /dev/null
@@ -0,0 +1,669 @@
+/* drivers/gpu/drm/exynos/regs-fimc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com/
+ *
+ * Register definition file for Samsung Camera Interface (FIMC) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef EXYNOS_REGS_FIMC_H
+#define EXYNOS_REGS_FIMC_H
+
+/*
+ * Register part
+*/
+/* Input source format */
+#define EXYNOS_CISRCFMT                (0x00)
+/* Window offset */
+#define EXYNOS_CIWDOFST                (0x04)
+/* Global control */
+#define EXYNOS_CIGCTRL         (0x08)
+/* Window offset 2 */
+#define EXYNOS_CIWDOFST2       (0x14)
+/* Y 1st frame start address for output DMA */
+#define EXYNOS_CIOYSA1         (0x18)
+/* Y 2nd frame start address for output DMA */
+#define EXYNOS_CIOYSA2         (0x1c)
+/* Y 3rd frame start address for output DMA */
+#define EXYNOS_CIOYSA3         (0x20)
+/* Y 4th frame start address for output DMA */
+#define EXYNOS_CIOYSA4         (0x24)
+/* Cb 1st frame start address for output DMA */
+#define EXYNOS_CIOCBSA1                (0x28)
+/* Cb 2nd frame start address for output DMA */
+#define EXYNOS_CIOCBSA2                (0x2c)
+/* Cb 3rd frame start address for output DMA */
+#define EXYNOS_CIOCBSA3                (0x30)
+/* Cb 4th frame start address for output DMA */
+#define EXYNOS_CIOCBSA4                (0x34)
+/* Cr 1st frame start address for output DMA */
+#define EXYNOS_CIOCRSA1                (0x38)
+/* Cr 2nd frame start address for output DMA */
+#define EXYNOS_CIOCRSA2                (0x3c)
+/* Cr 3rd frame start address for output DMA */
+#define EXYNOS_CIOCRSA3                (0x40)
+/* Cr 4th frame start address for output DMA */
+#define EXYNOS_CIOCRSA4                (0x44)
+/* Target image format */
+#define EXYNOS_CITRGFMT                (0x48)
+/* Output DMA control */
+#define EXYNOS_CIOCTRL         (0x4c)
+/* Pre-scaler control 1 */
+#define EXYNOS_CISCPRERATIO    (0x50)
+/* Pre-scaler control 2 */
+#define EXYNOS_CISCPREDST              (0x54)
+/* Main scaler control */
+#define EXYNOS_CISCCTRL                (0x58)
+/* Target area */
+#define EXYNOS_CITAREA         (0x5c)
+/* Status */
+#define EXYNOS_CISTATUS                (0x64)
+/* Status2 */
+#define EXYNOS_CISTATUS2               (0x68)
+/* Image capture enable command */
+#define EXYNOS_CIIMGCPT                (0xc0)
+/* Capture sequence */
+#define EXYNOS_CICPTSEQ                (0xc4)
+/* Image effects */
+#define EXYNOS_CIIMGEFF                (0xd0)
+/* Y frame start address for input DMA */
+#define EXYNOS_CIIYSA0         (0xd4)
+/* Cb frame start address for input DMA */
+#define EXYNOS_CIICBSA0                (0xd8)
+/* Cr frame start address for input DMA */
+#define EXYNOS_CIICRSA0                (0xdc)
+/* Input DMA Y Line Skip */
+#define EXYNOS_CIILINESKIP_Y   (0xec)
+/* Input DMA Cb Line Skip */
+#define EXYNOS_CIILINESKIP_CB  (0xf0)
+/* Input DMA Cr Line Skip */
+#define EXYNOS_CIILINESKIP_CR  (0xf4)
+/* Real input DMA image size */
+#define EXYNOS_CIREAL_ISIZE    (0xf8)
+/* Input DMA control */
+#define EXYNOS_MSCTRL          (0xfc)
+/* Y frame start address for input DMA */
+#define EXYNOS_CIIYSA1         (0x144)
+/* Cb frame start address for input DMA */
+#define EXYNOS_CIICBSA1                (0x148)
+/* Cr frame start address for input DMA */
+#define EXYNOS_CIICRSA1                (0x14c)
+/* Output DMA Y offset */
+#define EXYNOS_CIOYOFF         (0x168)
+/* Output DMA CB offset */
+#define EXYNOS_CIOCBOFF                (0x16c)
+/* Output DMA CR offset */
+#define EXYNOS_CIOCROFF                (0x170)
+/* Input DMA Y offset */
+#define EXYNOS_CIIYOFF         (0x174)
+/* Input DMA CB offset */
+#define EXYNOS_CIICBOFF                (0x178)
+/* Input DMA CR offset */
+#define EXYNOS_CIICROFF                (0x17c)
+/* Input DMA original image size */
+#define EXYNOS_ORGISIZE                (0x180)
+/* Output DMA original image size */
+#define EXYNOS_ORGOSIZE                (0x184)
+/* Real output DMA image size */
+#define EXYNOS_CIEXTEN         (0x188)
+/* DMA parameter */
+#define EXYNOS_CIDMAPARAM              (0x18c)
+/* MIPI CSI image format */
+#define EXYNOS_CSIIMGFMT               (0x194)
+/* FIMC Clock Source Select */
+#define EXYNOS_MISC_FIMC               (0x198)
+
+/* Add for FIMC v5.1 */
+/* Output Frame Buffer Sequence */
+#define EXYNOS_CIFCNTSEQ               (0x1fc)
+/* Y 5th frame start address for output DMA */
+#define EXYNOS_CIOYSA5         (0x200)
+/* Y 6th frame start address for output DMA */
+#define EXYNOS_CIOYSA6         (0x204)
+/* Y 7th frame start address for output DMA */
+#define EXYNOS_CIOYSA7         (0x208)
+/* Y 8th frame start address for output DMA */
+#define EXYNOS_CIOYSA8         (0x20c)
+/* Y 9th frame start address for output DMA */
+#define EXYNOS_CIOYSA9         (0x210)
+/* Y 10th frame start address for output DMA */
+#define EXYNOS_CIOYSA10                (0x214)
+/* Y 11th frame start address for output DMA */
+#define EXYNOS_CIOYSA11                (0x218)
+/* Y 12th frame start address for output DMA */
+#define EXYNOS_CIOYSA12                (0x21c)
+/* Y 13th frame start address for output DMA */
+#define EXYNOS_CIOYSA13                (0x220)
+/* Y 14th frame start address for output DMA */
+#define EXYNOS_CIOYSA14                (0x224)
+/* Y 15th frame start address for output DMA */
+#define EXYNOS_CIOYSA15                (0x228)
+/* Y 16th frame start address for output DMA */
+#define EXYNOS_CIOYSA16                (0x22c)
+/* Y 17th frame start address for output DMA */
+#define EXYNOS_CIOYSA17                (0x230)
+/* Y 18th frame start address for output DMA */
+#define EXYNOS_CIOYSA18                (0x234)
+/* Y 19th frame start address for output DMA */
+#define EXYNOS_CIOYSA19                (0x238)
+/* Y 20th frame start address for output DMA */
+#define EXYNOS_CIOYSA20                (0x23c)
+/* Y 21th frame start address for output DMA */
+#define EXYNOS_CIOYSA21                (0x240)
+/* Y 22th frame start address for output DMA */
+#define EXYNOS_CIOYSA22                (0x244)
+/* Y 23th frame start address for output DMA */
+#define EXYNOS_CIOYSA23                (0x248)
+/* Y 24th frame start address for output DMA */
+#define EXYNOS_CIOYSA24                (0x24c)
+/* Y 25th frame start address for output DMA */
+#define EXYNOS_CIOYSA25                (0x250)
+/* Y 26th frame start address for output DMA */
+#define EXYNOS_CIOYSA26                (0x254)
+/* Y 27th frame start address for output DMA */
+#define EXYNOS_CIOYSA27                (0x258)
+/* Y 28th frame start address for output DMA */
+#define EXYNOS_CIOYSA28                (0x25c)
+/* Y 29th frame start address for output DMA */
+#define EXYNOS_CIOYSA29                (0x260)
+/* Y 30th frame start address for output DMA */
+#define EXYNOS_CIOYSA30                (0x264)
+/* Y 31th frame start address for output DMA */
+#define EXYNOS_CIOYSA31                (0x268)
+/* Y 32th frame start address for output DMA */
+#define EXYNOS_CIOYSA32                (0x26c)
+
+/* CB 5th frame start address for output DMA */
+#define EXYNOS_CIOCBSA5                (0x270)
+/* CB 6th frame start address for output DMA */
+#define EXYNOS_CIOCBSA6                (0x274)
+/* CB 7th frame start address for output DMA */
+#define EXYNOS_CIOCBSA7                (0x278)
+/* CB 8th frame start address for output DMA */
+#define EXYNOS_CIOCBSA8                (0x27c)
+/* CB 9th frame start address for output DMA */
+#define EXYNOS_CIOCBSA9                (0x280)
+/* CB 10th frame start address for output DMA */
+#define EXYNOS_CIOCBSA10               (0x284)
+/* CB 11th frame start address for output DMA */
+#define EXYNOS_CIOCBSA11               (0x288)
+/* CB 12th frame start address for output DMA */
+#define EXYNOS_CIOCBSA12               (0x28c)
+/* CB 13th frame start address for output DMA */
+#define EXYNOS_CIOCBSA13               (0x290)
+/* CB 14th frame start address for output DMA */
+#define EXYNOS_CIOCBSA14               (0x294)
+/* CB 15th frame start address for output DMA */
+#define EXYNOS_CIOCBSA15               (0x298)
+/* CB 16th frame start address for output DMA */
+#define EXYNOS_CIOCBSA16               (0x29c)
+/* CB 17th frame start address for output DMA */
+#define EXYNOS_CIOCBSA17               (0x2a0)
+/* CB 18th frame start address for output DMA */
+#define EXYNOS_CIOCBSA18               (0x2a4)
+/* CB 19th frame start address for output DMA */
+#define EXYNOS_CIOCBSA19               (0x2a8)
+/* CB 20th frame start address for output DMA */
+#define EXYNOS_CIOCBSA20               (0x2ac)
+/* CB 21th frame start address for output DMA */
+#define EXYNOS_CIOCBSA21               (0x2b0)
+/* CB 22th frame start address for output DMA */
+#define EXYNOS_CIOCBSA22               (0x2b4)
+/* CB 23th frame start address for output DMA */
+#define EXYNOS_CIOCBSA23               (0x2b8)
+/* CB 24th frame start address for output DMA */
+#define EXYNOS_CIOCBSA24               (0x2bc)
+/* CB 25th frame start address for output DMA */
+#define EXYNOS_CIOCBSA25               (0x2c0)
+/* CB 26th frame start address for output DMA */
+#define EXYNOS_CIOCBSA26               (0x2c4)
+/* CB 27th frame start address for output DMA */
+#define EXYNOS_CIOCBSA27               (0x2c8)
+/* CB 28th frame start address for output DMA */
+#define EXYNOS_CIOCBSA28               (0x2cc)
+/* CB 29th frame start address for output DMA */
+#define EXYNOS_CIOCBSA29               (0x2d0)
+/* CB 30th frame start address for output DMA */
+#define EXYNOS_CIOCBSA30               (0x2d4)
+/* CB 31th frame start address for output DMA */
+#define EXYNOS_CIOCBSA31               (0x2d8)
+/* CB 32th frame start address for output DMA */
+#define EXYNOS_CIOCBSA32               (0x2dc)
+
+/* CR 5th frame start address for output DMA */
+#define EXYNOS_CIOCRSA5                (0x2e0)
+/* CR 6th frame start address for output DMA */
+#define EXYNOS_CIOCRSA6                (0x2e4)
+/* CR 7th frame start address for output DMA */
+#define EXYNOS_CIOCRSA7                (0x2e8)
+/* CR 8th frame start address for output DMA */
+#define EXYNOS_CIOCRSA8                (0x2ec)
+/* CR 9th frame start address for output DMA */
+#define EXYNOS_CIOCRSA9                (0x2f0)
+/* CR 10th frame start address for output DMA */
+#define EXYNOS_CIOCRSA10               (0x2f4)
+/* CR 11th frame start address for output DMA */
+#define EXYNOS_CIOCRSA11               (0x2f8)
+/* CR 12th frame start address for output DMA */
+#define EXYNOS_CIOCRSA12               (0x2fc)
+/* CR 13th frame start address for output DMA */
+#define EXYNOS_CIOCRSA13               (0x300)
+/* CR 14th frame start address for output DMA */
+#define EXYNOS_CIOCRSA14               (0x304)
+/* CR 15th frame start address for output DMA */
+#define EXYNOS_CIOCRSA15               (0x308)
+/* CR 16th frame start address for output DMA */
+#define EXYNOS_CIOCRSA16               (0x30c)
+/* CR 17th frame start address for output DMA */
+#define EXYNOS_CIOCRSA17               (0x310)
+/* CR 18th frame start address for output DMA */
+#define EXYNOS_CIOCRSA18               (0x314)
+/* CR 19th frame start address for output DMA */
+#define EXYNOS_CIOCRSA19               (0x318)
+/* CR 20th frame start address for output DMA */
+#define EXYNOS_CIOCRSA20               (0x31c)
+/* CR 21th frame start address for output DMA */
+#define EXYNOS_CIOCRSA21               (0x320)
+/* CR 22th frame start address for output DMA */
+#define EXYNOS_CIOCRSA22               (0x324)
+/* CR 23th frame start address for output DMA */
+#define EXYNOS_CIOCRSA23               (0x328)
+/* CR 24th frame start address for output DMA */
+#define EXYNOS_CIOCRSA24               (0x32c)
+/* CR 25th frame start address for output DMA */
+#define EXYNOS_CIOCRSA25               (0x330)
+/* CR 26th frame start address for output DMA */
+#define EXYNOS_CIOCRSA26               (0x334)
+/* CR 27th frame start address for output DMA */
+#define EXYNOS_CIOCRSA27               (0x338)
+/* CR 28th frame start address for output DMA */
+#define EXYNOS_CIOCRSA28               (0x33c)
+/* CR 29th frame start address for output DMA */
+#define EXYNOS_CIOCRSA29               (0x340)
+/* CR 30th frame start address for output DMA */
+#define EXYNOS_CIOCRSA30               (0x344)
+/* CR 31th frame start address for output DMA */
+#define EXYNOS_CIOCRSA31               (0x348)
+/* CR 32th frame start address for output DMA */
+#define EXYNOS_CIOCRSA32               (0x34c)
+
+/*
+ * Macro part
+*/
+/* frame start address 1 ~ 4, 5 ~ 32 */
+/* Number of Default PingPong Memory */
+#define DEF_PP         4
+#define EXYNOS_CIOYSA(__x)             \
+       (((__x) < DEF_PP) ?     \
+        (EXYNOS_CIOYSA1  + (__x) * 4) : \
+       (EXYNOS_CIOYSA5  + ((__x) - DEF_PP) * 4))
+#define EXYNOS_CIOCBSA(__x)    \
+       (((__x) < DEF_PP) ?     \
+        (EXYNOS_CIOCBSA1 + (__x) * 4) : \
+       (EXYNOS_CIOCBSA5 + ((__x) - DEF_PP) * 4))
+#define EXYNOS_CIOCRSA(__x)    \
+       (((__x) < DEF_PP) ?     \
+        (EXYNOS_CIOCRSA1 + (__x) * 4) : \
+       (EXYNOS_CIOCRSA5 + ((__x) - DEF_PP) * 4))
+/* Number of Default PingPong Memory */
+#define DEF_IPP                1
+#define EXYNOS_CIIYSA(__x)             \
+       (((__x) < DEF_IPP) ?    \
+        (EXYNOS_CIIYSA0) : (EXYNOS_CIIYSA1))
+#define EXYNOS_CIICBSA(__x)    \
+       (((__x) < DEF_IPP) ?    \
+        (EXYNOS_CIICBSA0) : (EXYNOS_CIICBSA1))
+#define EXYNOS_CIICRSA(__x)    \
+       (((__x) < DEF_IPP) ?    \
+        (EXYNOS_CIICRSA0) : (EXYNOS_CIICRSA1))
+
+#define EXYNOS_CISRCFMT_SOURCEHSIZE(x)         ((x) << 16)
+#define EXYNOS_CISRCFMT_SOURCEVSIZE(x)         ((x) << 0)
+
+#define EXYNOS_CIWDOFST_WINHOROFST(x)          ((x) << 16)
+#define EXYNOS_CIWDOFST_WINVEROFST(x)          ((x) << 0)
+
+#define EXYNOS_CIWDOFST2_WINHOROFST2(x)                ((x) << 16)
+#define EXYNOS_CIWDOFST2_WINVEROFST2(x)                ((x) << 0)
+
+#define EXYNOS_CITRGFMT_TARGETHSIZE(x)         (((x) & 0x1fff) << 16)
+#define EXYNOS_CITRGFMT_TARGETVSIZE(x)         (((x) & 0x1fff) << 0)
+
+#define EXYNOS_CISCPRERATIO_SHFACTOR(x)                ((x) << 28)
+#define EXYNOS_CISCPRERATIO_PREHORRATIO(x)             ((x) << 16)
+#define EXYNOS_CISCPRERATIO_PREVERRATIO(x)             ((x) << 0)
+
+#define EXYNOS_CISCPREDST_PREDSTWIDTH(x)               ((x) << 16)
+#define EXYNOS_CISCPREDST_PREDSTHEIGHT(x)              ((x) << 0)
+
+#define EXYNOS_CISCCTRL_MAINHORRATIO(x)                ((x) << 16)
+#define EXYNOS_CISCCTRL_MAINVERRATIO(x)                ((x) << 0)
+
+#define EXYNOS_CITAREA_TARGET_AREA(x)          ((x) << 0)
+
+#define EXYNOS_CISTATUS_GET_FRAME_COUNT(x)             (((x) >> 26) & 0x3)
+#define EXYNOS_CISTATUS_GET_FRAME_END(x)               (((x) >> 17) & 0x1)
+#define EXYNOS_CISTATUS_GET_LAST_CAPTURE_END(x)        (((x) >> 16) & 0x1)
+#define EXYNOS_CISTATUS_GET_LCD_STATUS(x)              (((x) >> 9) & 0x1)
+#define EXYNOS_CISTATUS_GET_ENVID_STATUS(x)    (((x) >> 8) & 0x1)
+
+#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(x)      (((x) >> 7) & 0x3f)
+#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(x)     ((x) & 0x3f)
+
+#define EXYNOS_CIIMGEFF_FIN(x)                 ((x & 0x7) << 26)
+#define EXYNOS_CIIMGEFF_PAT_CB(x)                      ((x) << 13)
+#define EXYNOS_CIIMGEFF_PAT_CR(x)                      ((x) << 0)
+
+#define EXYNOS_CIILINESKIP(x)                  (((x) & 0xf) << 24)
+
+#define EXYNOS_CIREAL_ISIZE_HEIGHT(x)          ((x) << 16)
+#define EXYNOS_CIREAL_ISIZE_WIDTH(x)           ((x) << 0)
+
+#define EXYNOS_MSCTRL_SUCCESSIVE_COUNT(x)              ((x) << 24)
+#define EXYNOS_MSCTRL_GET_INDMA_STATUS(x)              ((x) & 0x1)
+
+#define EXYNOS_CIOYOFF_VERTICAL(x)                     ((x) << 16)
+#define EXYNOS_CIOYOFF_HORIZONTAL(x)           ((x) << 0)
+
+#define EXYNOS_CIOCBOFF_VERTICAL(x)            ((x) << 16)
+#define EXYNOS_CIOCBOFF_HORIZONTAL(x)          ((x) << 0)
+
+#define EXYNOS_CIOCROFF_VERTICAL(x)            ((x) << 16)
+#define EXYNOS_CIOCROFF_HORIZONTAL(x)          ((x) << 0)
+
+#define EXYNOS_CIIYOFF_VERTICAL(x)                     ((x) << 16)
+#define EXYNOS_CIIYOFF_HORIZONTAL(x)           ((x) << 0)
+
+#define EXYNOS_CIICBOFF_VERTICAL(x)            ((x) << 16)
+#define EXYNOS_CIICBOFF_HORIZONTAL(x)          ((x) << 0)
+
+#define EXYNOS_CIICROFF_VERTICAL(x)            ((x) << 16)
+#define EXYNOS_CIICROFF_HORIZONTAL(x)          ((x) << 0)
+
+#define EXYNOS_ORGISIZE_VERTICAL(x)            ((x) << 16)
+#define EXYNOS_ORGISIZE_HORIZONTAL(x)          ((x) << 0)
+
+#define EXYNOS_ORGOSIZE_VERTICAL(x)            ((x) << 16)
+#define EXYNOS_ORGOSIZE_HORIZONTAL(x)          ((x) << 0)
+
+#define EXYNOS_CIEXTEN_TARGETH_EXT(x)          ((((x) & 0x2000) >> 13) << 26)
+#define EXYNOS_CIEXTEN_TARGETV_EXT(x)          ((((x) & 0x2000) >> 13) << 24)
+#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT(x)             (((x) & 0x3F) << 10)
+#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT(x)             ((x) & 0x3F)
+
+/*
+ * Bit definition part
+*/
+/* Source format register */
+#define EXYNOS_CISRCFMT_ITU601_8BIT            (1 << 31)
+#define EXYNOS_CISRCFMT_ITU656_8BIT            (0 << 31)
+#define EXYNOS_CISRCFMT_ITU601_16BIT           (1 << 29)
+#define EXYNOS_CISRCFMT_ORDER422_YCBYCR                (0 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_YCRYCB                (1 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_CBYCRY                (2 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_CRYCBY                (3 << 14)
+/* ITU601 16bit only */
+#define EXYNOS_CISRCFMT_ORDER422_Y4CBCRCBCR    (0 << 14)
+/* ITU601 16bit only */
+#define EXYNOS_CISRCFMT_ORDER422_Y4CRCBCRCB    (1 << 14)
+
+/* Window offset register */
+#define EXYNOS_CIWDOFST_WINOFSEN                       (1 << 31)
+#define EXYNOS_CIWDOFST_CLROVFIY                       (1 << 30)
+#define EXYNOS_CIWDOFST_CLROVRLB                       (1 << 29)
+#define EXYNOS_CIWDOFST_WINHOROFST_MASK                (0x7ff << 16)
+#define EXYNOS_CIWDOFST_CLROVFICB                      (1 << 15)
+#define EXYNOS_CIWDOFST_CLROVFICR                      (1 << 14)
+#define EXYNOS_CIWDOFST_WINVEROFST_MASK                (0xfff << 0)
+
+/* Global control register */
+#define EXYNOS_CIGCTRL_SWRST                   (1 << 31)
+#define EXYNOS_CIGCTRL_CAMRST_A                        (1 << 30)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_B            (0 << 29)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_A            (1 << 29)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_MASK         (1 << 29)
+#define EXYNOS_CIGCTRL_TESTPATTERN_NORMAL              (0 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR   (1 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_HOR_INC             (2 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_VER_INC             (3 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_MASK                (3 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_SHIFT               (27)
+#define EXYNOS_CIGCTRL_INVPOLPCLK                      (1 << 26)
+#define EXYNOS_CIGCTRL_INVPOLVSYNC                     (1 << 25)
+#define EXYNOS_CIGCTRL_INVPOLHREF                      (1 << 24)
+#define EXYNOS_CIGCTRL_IRQ_OVFEN                       (1 << 22)
+#define EXYNOS_CIGCTRL_HREF_MASK                       (1 << 21)
+#define EXYNOS_CIGCTRL_IRQ_EDGE                        (0 << 20)
+#define EXYNOS_CIGCTRL_IRQ_LEVEL                       (1 << 20)
+#define EXYNOS_CIGCTRL_IRQ_CLR                 (1 << 19)
+#define EXYNOS_CIGCTRL_IRQ_END_DISABLE         (1 << 18)
+#define EXYNOS_CIGCTRL_IRQ_DISABLE                     (0 << 16)
+#define EXYNOS_CIGCTRL_IRQ_ENABLE                      (1 << 16)
+#define EXYNOS_CIGCTRL_SHADOW_DISABLE          (1 << 12)
+#define EXYNOS_CIGCTRL_CAM_JPEG                        (1 << 8)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_B           (0 << 7)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_A           (1 << 7)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_MASK                (1 << 7)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_CAMERA      (0 << 6)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK   (1 << 6)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_MASK               (1 << 10)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_A          (1 << 10)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_B          (0 << 10)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_MASK                (1 << 6)
+#define EXYNOS_CIGCTRL_CSC_ITU601                      (0 << 5)
+#define EXYNOS_CIGCTRL_CSC_ITU709                      (1 << 5)
+#define EXYNOS_CIGCTRL_CSC_MASK                        (1 << 5)
+#define EXYNOS_CIGCTRL_INVPOLHSYNC                     (1 << 4)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_ITU         (0 << 3)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_MIPI                (1 << 3)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_MASK                (1 << 3)
+#define EXYNOS_CIGCTRL_PROGRESSIVE                     (0 << 0)
+#define EXYNOS_CIGCTRL_INTERLACE                       (1 << 0)
+
+/* Window offset2 register */
+#define EXYNOS_CIWDOFST_WINHOROFST2_MASK               (0xfff << 16)
+#define EXYNOS_CIWDOFST_WINVEROFST2_MASK               (0xfff << 16)
+
+/* Target format register */
+#define EXYNOS_CITRGFMT_INROT90_CLOCKWISE              (1 << 31)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420             (0 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422             (1 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE      (2 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_RGB          (3 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_MASK         (3 << 29)
+#define EXYNOS_CITRGFMT_FLIP_SHIFT                     (14)
+#define EXYNOS_CITRGFMT_FLIP_NORMAL            (0 << 14)
+#define EXYNOS_CITRGFMT_FLIP_X_MIRROR          (1 << 14)
+#define EXYNOS_CITRGFMT_FLIP_Y_MIRROR          (2 << 14)
+#define EXYNOS_CITRGFMT_FLIP_180                       (3 << 14)
+#define EXYNOS_CITRGFMT_FLIP_MASK                      (3 << 14)
+#define EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE             (1 << 13)
+#define EXYNOS_CITRGFMT_TARGETV_MASK           (0x1fff << 0)
+#define EXYNOS_CITRGFMT_TARGETH_MASK           (0x1fff << 16)
+
+/* Output DMA control register */
+#define EXYNOS_CIOCTRL_WEAVE_OUT                       (1 << 31)
+#define EXYNOS_CIOCTRL_WEAVE_MASK                      (1 << 31)
+#define EXYNOS_CIOCTRL_LASTENDEN                       (1 << 30)
+#define EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR                (0 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB                (1 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_MSB_CRCB                (2 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_MSB_CBCR                (3 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_SHIFT           (24)
+#define EXYNOS_CIOCTRL_ORDER2P_MASK            (3 << 24)
+#define EXYNOS_CIOCTRL_YCBCR_3PLANE            (0 << 3)
+#define EXYNOS_CIOCTRL_YCBCR_2PLANE            (1 << 3)
+#define EXYNOS_CIOCTRL_YCBCR_PLANE_MASK                (1 << 3)
+#define EXYNOS_CIOCTRL_LASTIRQ_ENABLE          (1 << 2)
+#define EXYNOS_CIOCTRL_ALPHA_OUT                       (0xff << 4)
+#define EXYNOS_CIOCTRL_ORDER422_YCBYCR         (0 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_YCRYCB         (1 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_CBYCRY         (2 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_CRYCBY         (3 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_MASK           (3 << 0)
+
+/* Main scaler control register */
+#define EXYNOS_CISCCTRL_SCALERBYPASS           (1 << 31)
+#define EXYNOS_CISCCTRL_SCALEUP_H                      (1 << 30)
+#define EXYNOS_CISCCTRL_SCALEUP_V                      (1 << 29)
+#define EXYNOS_CISCCTRL_CSCR2Y_NARROW          (0 << 28)
+#define EXYNOS_CISCCTRL_CSCR2Y_WIDE            (1 << 28)
+#define EXYNOS_CISCCTRL_CSCY2R_NARROW          (0 << 27)
+#define EXYNOS_CISCCTRL_CSCY2R_WIDE            (1 << 27)
+#define EXYNOS_CISCCTRL_LCDPATHEN_FIFO         (1 << 26)
+#define EXYNOS_CISCCTRL_PROGRESSIVE            (0 << 25)
+#define EXYNOS_CISCCTRL_INTERLACE                      (1 << 25)
+#define EXYNOS_CISCCTRL_SCAN_MASK                      (1 << 25)
+#define EXYNOS_CISCCTRL_SCALERSTART            (1 << 15)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB565               (0 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB666               (1 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB888               (2 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK             (3 << 13)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565              (0 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB666              (1 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888              (2 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK    (3 << 11)
+#define EXYNOS_CISCCTRL_EXTRGB_NORMAL          (0 << 10)
+#define EXYNOS_CISCCTRL_EXTRGB_EXTENSION               (1 << 10)
+#define EXYNOS_CISCCTRL_ONE2ONE                        (1 << 9)
+#define EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK              (0x1ff << 0)
+#define EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK              (0x1ff << 16)
+
+/* Status register */
+#define EXYNOS_CISTATUS_OVFIY                  (1 << 31)
+#define EXYNOS_CISTATUS_OVFICB                 (1 << 30)
+#define EXYNOS_CISTATUS_OVFICR                 (1 << 29)
+#define EXYNOS_CISTATUS_VSYNC                  (1 << 28)
+#define EXYNOS_CISTATUS_SCALERSTART            (1 << 26)
+#define EXYNOS_CISTATUS_WINOFSTEN                      (1 << 25)
+#define EXYNOS_CISTATUS_IMGCPTEN                       (1 << 22)
+#define EXYNOS_CISTATUS_IMGCPTENSC                     (1 << 21)
+#define EXYNOS_CISTATUS_VSYNC_A                        (1 << 20)
+#define EXYNOS_CISTATUS_VSYNC_B                        (1 << 19)
+#define EXYNOS_CISTATUS_OVRLB                  (1 << 18)
+#define EXYNOS_CISTATUS_FRAMEEND                       (1 << 17)
+#define EXYNOS_CISTATUS_LASTCAPTUREEND         (1 << 16)
+#define EXYNOS_CISTATUS_VVALID_A                       (1 << 15)
+#define EXYNOS_CISTATUS_VVALID_B                       (1 << 14)
+
+/* Image capture enable register */
+#define EXYNOS_CIIMGCPT_IMGCPTEN                       (1 << 31)
+#define EXYNOS_CIIMGCPT_IMGCPTEN_SC            (1 << 30)
+#define EXYNOS_CIIMGCPT_CPT_FREN_ENABLE                (1 << 25)
+#define EXYNOS_CIIMGCPT_CPT_FRMOD_EN           (0 << 18)
+#define EXYNOS_CIIMGCPT_CPT_FRMOD_CNT          (1 << 18)
+
+/* Image effects register */
+#define EXYNOS_CIIMGEFF_IE_DISABLE                     (0 << 30)
+#define EXYNOS_CIIMGEFF_IE_ENABLE                      (1 << 30)
+#define EXYNOS_CIIMGEFF_IE_SC_BEFORE           (0 << 29)
+#define EXYNOS_CIIMGEFF_IE_SC_AFTER            (1 << 29)
+#define EXYNOS_CIIMGEFF_FIN_BYPASS                     (0 << 26)
+#define EXYNOS_CIIMGEFF_FIN_ARBITRARY          (1 << 26)
+#define EXYNOS_CIIMGEFF_FIN_NEGATIVE           (2 << 26)
+#define EXYNOS_CIIMGEFF_FIN_ARTFREEZE          (3 << 26)
+#define EXYNOS_CIIMGEFF_FIN_EMBOSSING          (4 << 26)
+#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE         (5 << 26)
+#define EXYNOS_CIIMGEFF_FIN_MASK                       (7 << 26)
+#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK          ((0xff < 13) | (0xff < 0))
+
+/* Real input DMA size register */
+#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE    (1 << 31)
+#define EXYNOS_CIREAL_ISIZE_ADDR_CH_DISABLE    (1 << 30)
+#define EXYNOS_CIREAL_ISIZE_HEIGHT_MASK                (0x3FFF << 16)
+#define EXYNOS_CIREAL_ISIZE_WIDTH_MASK         (0x3FFF << 0)
+
+/* Input DMA control register */
+#define EXYNOS_MSCTRL_FIELD_MASK                       (1 << 31)
+#define EXYNOS_MSCTRL_FIELD_WEAVE                      (1 << 31)
+#define EXYNOS_MSCTRL_FIELD_NORMAL                     (0 << 31)
+#define EXYNOS_MSCTRL_BURST_CNT                        (24)
+#define EXYNOS_MSCTRL_BURST_CNT_MASK           (0xf << 24)
+#define EXYNOS_MSCTRL_ORDER2P_LSB_CBCR         (0 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_LSB_CRCB         (1 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_MSB_CRCB         (2 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_MSB_CBCR         (3 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_SHIFT            (16)
+#define EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK               (0x3 << 16)
+#define EXYNOS_MSCTRL_C_INT_IN_3PLANE          (0 << 15)
+#define EXYNOS_MSCTRL_C_INT_IN_2PLANE          (1 << 15)
+#define EXYNOS_MSCTRL_FLIP_SHIFT                       (13)
+#define EXYNOS_MSCTRL_FLIP_NORMAL                      (0 << 13)
+#define EXYNOS_MSCTRL_FLIP_X_MIRROR            (1 << 13)
+#define EXYNOS_MSCTRL_FLIP_Y_MIRROR            (2 << 13)
+#define EXYNOS_MSCTRL_FLIP_180                 (3 << 13)
+#define EXYNOS_MSCTRL_FLIP_MASK                        (3 << 13)
+#define EXYNOS_MSCTRL_ORDER422_CRYCBY          (0 << 4)
+#define EXYNOS_MSCTRL_ORDER422_YCRYCB          (1 << 4)
+#define EXYNOS_MSCTRL_ORDER422_CBYCRY          (2 << 4)
+#define EXYNOS_MSCTRL_ORDER422_YCBYCR          (3 << 4)
+#define EXYNOS_MSCTRL_INPUT_EXTCAM                     (0 << 3)
+#define EXYNOS_MSCTRL_INPUT_MEMORY                     (1 << 3)
+#define EXYNOS_MSCTRL_INPUT_MASK                       (1 << 3)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR420                (0 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR422                (1 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE (2 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_RGB                     (3 << 1)
+#define EXYNOS_MSCTRL_ENVID                    (1 << 0)
+
+/* DMA parameter register */
+#define EXYNOS_CIDMAPARAM_R_MODE_LINEAR                (0 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_CONFTILE              (1 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_16X16         (2 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_64X32         (3 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_MASK          (3 << 29)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_64              (0 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_128             (1 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_256             (2 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_512             (3 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_1024    (4 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_2048    (5 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_4096    (6 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_1               (0 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_2               (1 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_4               (2 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_8               (3 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_16              (4 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_32              (5 << 20)
+#define EXYNOS_CIDMAPARAM_W_MODE_LINEAR                (0 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_CONFTILE              (1 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_16X16         (2 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_64X32         (3 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_MASK          (3 << 13)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_64              (0 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_128             (1 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_256             (2 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_512             (3 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_1024    (4 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_2048    (5 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_4096    (6 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_1               (0 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_2               (1 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_4               (2 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_8               (3 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_16              (4 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_32              (5 << 4)
+
+/* Gathering Extension register */
+#define EXYNOS_CIEXTEN_TARGETH_EXT_MASK                (1 << 26)
+#define EXYNOS_CIEXTEN_TARGETV_EXT_MASK                (1 << 24)
+#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK   (0x3F << 10)
+#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK   (0x3F)
+#define EXYNOS_CIEXTEN_YUV444_OUT                      (1 << 22)
+
+/* FIMC Clock Source Select register */
+#define EXYNOS_CLKSRC_HCLK                             (0 << 1)
+#define EXYNOS_CLKSRC_HCLK_MASK                        (1 << 1)
+#define EXYNOS_CLKSRC_SCLK                             (1 << 1)
+
+/* SYSREG for FIMC writeback */
+#define SYSREG_CAMERA_BLK                      (S3C_VA_SYS + 0x0218)
+#define SYSREG_ISP_BLK                         (S3C_VA_SYS + 0x020c)
+#define SYSREG_FIMD0WB_DEST_MASK       (0x3 << 23)
+#define SYSREG_FIMD0WB_DEST_SHIFT      23
+
+#endif /* EXYNOS_REGS_FIMC_H */
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
new file mode 100644 (file)
index 0000000..9ad5927
--- /dev/null
@@ -0,0 +1,284 @@
+/* linux/drivers/gpu/drm/exynos/regs-gsc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Register definition file for Samsung G-Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef EXYNOS_REGS_GSC_H_
+#define EXYNOS_REGS_GSC_H_
+
+/* G-Scaler enable */
+#define GSC_ENABLE                     0x00
+#define GSC_ENABLE_PP_UPDATE_TIME_MASK (1 << 9)
+#define GSC_ENABLE_PP_UPDATE_TIME_CURR (0 << 9)
+#define GSC_ENABLE_PP_UPDATE_TIME_EOPAS        (1 << 9)
+#define GSC_ENABLE_CLK_GATE_MODE_MASK  (1 << 8)
+#define GSC_ENABLE_CLK_GATE_MODE_FREE  (1 << 8)
+#define GSC_ENABLE_IPC_MODE_MASK       (1 << 7)
+#define GSC_ENABLE_NORM_MODE           (0 << 7)
+#define GSC_ENABLE_IPC_MODE            (1 << 7)
+#define GSC_ENABLE_PP_UPDATE_MODE_MASK (1 << 6)
+#define GSC_ENABLE_PP_UPDATE_FIRE_MODE (1 << 6)
+#define GSC_ENABLE_IN_PP_UPDATE                (1 << 5)
+#define GSC_ENABLE_ON_CLEAR_MASK       (1 << 4)
+#define GSC_ENABLE_ON_CLEAR_ONESHOT    (1 << 4)
+#define GSC_ENABLE_QOS_ENABLE          (1 << 3)
+#define GSC_ENABLE_OP_STATUS           (1 << 2)
+#define GSC_ENABLE_SFR_UPDATE          (1 << 1)
+#define GSC_ENABLE_ON                  (1 << 0)
+
+/* G-Scaler S/W reset */
+#define GSC_SW_RESET                   0x04
+#define GSC_SW_RESET_SRESET            (1 << 0)
+
+/* G-Scaler IRQ */
+#define GSC_IRQ                                0x08
+#define GSC_IRQ_STATUS_OR_IRQ          (1 << 17)
+#define GSC_IRQ_STATUS_OR_FRM_DONE     (1 << 16)
+#define GSC_IRQ_OR_MASK                        (1 << 2)
+#define GSC_IRQ_FRMDONE_MASK           (1 << 1)
+#define GSC_IRQ_ENABLE                 (1 << 0)
+
+/* G-Scaler input control */
+#define GSC_IN_CON                     0x10
+#define GSC_IN_CHROM_STRIDE_SEL_MASK   (1 << 20)
+#define GSC_IN_CHROM_STRIDE_SEPAR      (1 << 20)
+#define GSC_IN_RB_SWAP_MASK            (1 << 19)
+#define GSC_IN_RB_SWAP                 (1 << 19)
+#define GSC_IN_ROT_MASK                        (7 << 16)
+#define GSC_IN_ROT_270                 (7 << 16)
+#define GSC_IN_ROT_90_YFLIP            (6 << 16)
+#define GSC_IN_ROT_90_XFLIP            (5 << 16)
+#define GSC_IN_ROT_90                  (4 << 16)
+#define GSC_IN_ROT_180                 (3 << 16)
+#define GSC_IN_ROT_YFLIP               (2 << 16)
+#define GSC_IN_ROT_XFLIP               (1 << 16)
+#define GSC_IN_RGB_TYPE_MASK           (3 << 14)
+#define GSC_IN_RGB_HD_WIDE             (3 << 14)
+#define GSC_IN_RGB_HD_NARROW           (2 << 14)
+#define GSC_IN_RGB_SD_WIDE             (1 << 14)
+#define GSC_IN_RGB_SD_NARROW           (0 << 14)
+#define GSC_IN_YUV422_1P_ORDER_MASK    (1 << 13)
+#define GSC_IN_YUV422_1P_ORDER_LSB_Y   (0 << 13)
+#define GSC_IN_YUV422_1P_OEDER_LSB_C   (1 << 13)
+#define GSC_IN_CHROMA_ORDER_MASK       (1 << 12)
+#define GSC_IN_CHROMA_ORDER_CBCR       (0 << 12)
+#define GSC_IN_CHROMA_ORDER_CRCB       (1 << 12)
+#define GSC_IN_FORMAT_MASK             (7 << 8)
+#define GSC_IN_XRGB8888                        (0 << 8)
+#define GSC_IN_RGB565                  (1 << 8)
+#define GSC_IN_YUV420_2P               (2 << 8)
+#define GSC_IN_YUV420_3P               (3 << 8)
+#define GSC_IN_YUV422_1P               (4 << 8)
+#define GSC_IN_YUV422_2P               (5 << 8)
+#define GSC_IN_YUV422_3P               (6 << 8)
+#define GSC_IN_TILE_TYPE_MASK          (1 << 4)
+#define GSC_IN_TILE_C_16x8             (0 << 4)
+#define GSC_IN_TILE_C_16x16            (1 << 4)
+#define GSC_IN_TILE_MODE               (1 << 3)
+#define GSC_IN_LOCAL_SEL_MASK          (3 << 1)
+#define GSC_IN_LOCAL_CAM3              (3 << 1)
+#define GSC_IN_LOCAL_FIMD_WB           (2 << 1)
+#define GSC_IN_LOCAL_CAM1              (1 << 1)
+#define GSC_IN_LOCAL_CAM0              (0 << 1)
+#define GSC_IN_PATH_MASK               (1 << 0)
+#define GSC_IN_PATH_LOCAL              (1 << 0)
+#define GSC_IN_PATH_MEMORY             (0 << 0)
+
+/* G-Scaler source image size */
+#define GSC_SRCIMG_SIZE                        0x14
+#define GSC_SRCIMG_HEIGHT_MASK         (0x1fff << 16)
+#define GSC_SRCIMG_HEIGHT(x)           ((x) << 16)
+#define GSC_SRCIMG_WIDTH_MASK          (0x3fff << 0)
+#define GSC_SRCIMG_WIDTH(x)            ((x) << 0)
+
+/* G-Scaler source image offset */
+#define GSC_SRCIMG_OFFSET              0x18
+#define GSC_SRCIMG_OFFSET_Y_MASK       (0x1fff << 16)
+#define GSC_SRCIMG_OFFSET_Y(x)         ((x) << 16)
+#define GSC_SRCIMG_OFFSET_X_MASK       (0x1fff << 0)
+#define GSC_SRCIMG_OFFSET_X(x)         ((x) << 0)
+
+/* G-Scaler cropped source image size */
+#define GSC_CROPPED_SIZE               0x1C
+#define GSC_CROPPED_HEIGHT_MASK                (0x1fff << 16)
+#define GSC_CROPPED_HEIGHT(x)          ((x) << 16)
+#define GSC_CROPPED_WIDTH_MASK         (0x1fff << 0)
+#define GSC_CROPPED_WIDTH(x)           ((x) << 0)
+
+/* G-Scaler output control */
+#define GSC_OUT_CON                    0x20
+#define GSC_OUT_GLOBAL_ALPHA_MASK      (0xff << 24)
+#define GSC_OUT_GLOBAL_ALPHA(x)                ((x) << 24)
+#define GSC_OUT_CHROM_STRIDE_SEL_MASK  (1 << 13)
+#define GSC_OUT_CHROM_STRIDE_SEPAR     (1 << 13)
+#define GSC_OUT_RB_SWAP_MASK           (1 << 12)
+#define GSC_OUT_RB_SWAP                        (1 << 12)
+#define GSC_OUT_RGB_TYPE_MASK          (3 << 10)
+#define GSC_OUT_RGB_HD_NARROW          (3 << 10)
+#define GSC_OUT_RGB_HD_WIDE            (2 << 10)
+#define GSC_OUT_RGB_SD_NARROW          (1 << 10)
+#define GSC_OUT_RGB_SD_WIDE            (0 << 10)
+#define GSC_OUT_YUV422_1P_ORDER_MASK   (1 << 9)
+#define GSC_OUT_YUV422_1P_ORDER_LSB_Y  (0 << 9)
+#define GSC_OUT_YUV422_1P_OEDER_LSB_C  (1 << 9)
+#define GSC_OUT_CHROMA_ORDER_MASK      (1 << 8)
+#define GSC_OUT_CHROMA_ORDER_CBCR      (0 << 8)
+#define GSC_OUT_CHROMA_ORDER_CRCB      (1 << 8)
+#define GSC_OUT_FORMAT_MASK            (7 << 4)
+#define GSC_OUT_XRGB8888               (0 << 4)
+#define GSC_OUT_RGB565                 (1 << 4)
+#define GSC_OUT_YUV420_2P              (2 << 4)
+#define GSC_OUT_YUV420_3P              (3 << 4)
+#define GSC_OUT_YUV422_1P              (4 << 4)
+#define GSC_OUT_YUV422_2P              (5 << 4)
+#define GSC_OUT_YUV444                 (7 << 4)
+#define GSC_OUT_TILE_TYPE_MASK         (1 << 2)
+#define GSC_OUT_TILE_C_16x8            (0 << 2)
+#define GSC_OUT_TILE_C_16x16           (1 << 2)
+#define GSC_OUT_TILE_MODE              (1 << 1)
+#define GSC_OUT_PATH_MASK              (1 << 0)
+#define GSC_OUT_PATH_LOCAL             (1 << 0)
+#define GSC_OUT_PATH_MEMORY            (0 << 0)
+
+/* G-Scaler scaled destination image size */
+#define GSC_SCALED_SIZE                        0x24
+#define GSC_SCALED_HEIGHT_MASK         (0x1fff << 16)
+#define GSC_SCALED_HEIGHT(x)           ((x) << 16)
+#define GSC_SCALED_WIDTH_MASK          (0x1fff << 0)
+#define GSC_SCALED_WIDTH(x)            ((x) << 0)
+
+/* G-Scaler pre scale ratio */
+#define GSC_PRE_SCALE_RATIO            0x28
+#define GSC_PRESC_SHFACTOR_MASK                (7 << 28)
+#define GSC_PRESC_SHFACTOR(x)          ((x) << 28)
+#define GSC_PRESC_V_RATIO_MASK         (7 << 16)
+#define GSC_PRESC_V_RATIO(x)           ((x) << 16)
+#define GSC_PRESC_H_RATIO_MASK         (7 << 0)
+#define GSC_PRESC_H_RATIO(x)           ((x) << 0)
+
+/* G-Scaler main scale horizontal ratio */
+#define GSC_MAIN_H_RATIO               0x2C
+#define GSC_MAIN_H_RATIO_MASK          (0xfffff << 0)
+#define GSC_MAIN_H_RATIO_VALUE(x)      ((x) << 0)
+
+/* G-Scaler main scale vertical ratio */
+#define GSC_MAIN_V_RATIO               0x30
+#define GSC_MAIN_V_RATIO_MASK          (0xfffff << 0)
+#define GSC_MAIN_V_RATIO_VALUE(x)      ((x) << 0)
+
+/* G-Scaler input chrominance stride */
+#define GSC_IN_CHROM_STRIDE            0x3C
+#define GSC_IN_CHROM_STRIDE_MASK       (0x3fff << 0)
+#define GSC_IN_CHROM_STRIDE_VALUE(x)   ((x) << 0)
+
+/* G-Scaler destination image size */
+#define GSC_DSTIMG_SIZE                        0x40
+#define GSC_DSTIMG_HEIGHT_MASK         (0x1fff << 16)
+#define GSC_DSTIMG_HEIGHT(x)           ((x) << 16)
+#define GSC_DSTIMG_WIDTH_MASK          (0x1fff << 0)
+#define GSC_DSTIMG_WIDTH(x)            ((x) << 0)
+
+/* G-Scaler destination image offset */
+#define GSC_DSTIMG_OFFSET              0x44
+#define GSC_DSTIMG_OFFSET_Y_MASK       (0x1fff << 16)
+#define GSC_DSTIMG_OFFSET_Y(x)         ((x) << 16)
+#define GSC_DSTIMG_OFFSET_X_MASK       (0x1fff << 0)
+#define GSC_DSTIMG_OFFSET_X(x)         ((x) << 0)
+
+/* G-Scaler output chrominance stride */
+#define GSC_OUT_CHROM_STRIDE           0x48
+#define GSC_OUT_CHROM_STRIDE_MASK      (0x3fff << 0)
+#define GSC_OUT_CHROM_STRIDE_VALUE(x)  ((x) << 0)
+
+/* G-Scaler input y address mask */
+#define GSC_IN_BASE_ADDR_Y_MASK                0x4C
+/* G-Scaler input y base address */
+#define GSC_IN_BASE_ADDR_Y(n)          (0x50 + (n) * 0x4)
+/* G-Scaler input y base current address */
+#define GSC_IN_BASE_ADDR_Y_CUR(n)      (0x60 + (n) * 0x4)
+
+/* G-Scaler input cb address mask */
+#define GSC_IN_BASE_ADDR_CB_MASK       0x7C
+/* G-Scaler input cb base address */
+#define GSC_IN_BASE_ADDR_CB(n)         (0x80 + (n) * 0x4)
+/* G-Scaler input cb base current address */
+#define GSC_IN_BASE_ADDR_CB_CUR(n)     (0x90 + (n) * 0x4)
+
+/* G-Scaler input cr address mask */
+#define GSC_IN_BASE_ADDR_CR_MASK       0xAC
+/* G-Scaler input cr base address */
+#define GSC_IN_BASE_ADDR_CR(n)         (0xB0 + (n) * 0x4)
+/* G-Scaler input cr base current address */
+#define GSC_IN_BASE_ADDR_CR_CUR(n)     (0xC0 + (n) * 0x4)
+
+/* G-Scaler input address mask */
+#define GSC_IN_CURR_ADDR_INDEX (0xf << 24)
+#define GSC_IN_CURR_GET_INDEX(x)       ((x) >> 24)
+#define GSC_IN_BASE_ADDR_PINGPONG(x)   ((x) << 16)
+#define GSC_IN_BASE_ADDR_MASK          (0xff << 0)
+
+/* G-Scaler output y address mask */
+#define GSC_OUT_BASE_ADDR_Y_MASK       0x10C
+/* G-Scaler output y base address */
+#define GSC_OUT_BASE_ADDR_Y(n)         (0x110 + (n) * 0x4)
+
+/* G-Scaler output cb address mask */
+#define GSC_OUT_BASE_ADDR_CB_MASK      0x15C
+/* G-Scaler output cb base address */
+#define GSC_OUT_BASE_ADDR_CB(n)                (0x160 + (n) * 0x4)
+
+/* G-Scaler output cr address mask */
+#define GSC_OUT_BASE_ADDR_CR_MASK      0x1AC
+/* G-Scaler output cr base address */
+#define GSC_OUT_BASE_ADDR_CR(n)                (0x1B0 + (n) * 0x4)
+
+/* G-Scaler output address mask */
+#define GSC_OUT_CURR_ADDR_INDEX                (0xf << 24)
+#define GSC_OUT_CURR_GET_INDEX(x)      ((x) >> 24)
+#define GSC_OUT_BASE_ADDR_PINGPONG(x)  ((x) << 16)
+#define GSC_OUT_BASE_ADDR_MASK         (0xffff << 0)
+
+/* G-Scaler horizontal scaling filter */
+#define GSC_HCOEF(n, s, x)     (0x300 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
+
+/* G-Scaler vertical scaling filter */
+#define GSC_VCOEF(n, s, x)     (0x200 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
+
+/* G-Scaler BUS control */
+#define GSC_BUSCON                     0xA78
+#define GSC_BUSCON_INT_TIME_MASK       (1 << 8)
+#define GSC_BUSCON_INT_DATA_TRANS      (0 << 8)
+#define GSC_BUSCON_INT_AXI_RESPONSE    (1 << 8)
+#define GSC_BUSCON_AWCACHE(x)          ((x) << 4)
+#define GSC_BUSCON_ARCACHE(x)          ((x) << 0)
+
+/* G-Scaler V position */
+#define GSC_VPOSITION                  0xA7C
+#define GSC_VPOS_F(x)                  ((x) << 0)
+
+
+/* G-Scaler clock initial count */
+#define GSC_CLK_INIT_COUNT             0xC00
+#define GSC_CLK_GATE_MODE_INIT_CNT(x)  ((x) << 0)
+
+/* G-Scaler clock snoop count */
+#define GSC_CLK_SNOOP_COUNT            0xC04
+#define GSC_CLK_GATE_MODE_SNOOP_CNT(x) ((x) << 0)
+
+/* SYSCON. GSCBLK_CFG */
+#define SYSREG_GSCBLK_CFG1             (S3C_VA_SYS + 0x0224)
+#define GSC_BLK_DISP1WB_DEST(x)                (x << 10)
+#define GSC_BLK_SW_RESET_WB_DEST(x)    (1 << (18 + x))
+#define GSC_BLK_PXLASYNC_LO_MASK_WB(x) (0 << (14 + x))
+#define GSC_BLK_GSCL_WB_IN_SRC_SEL(x)  (1 << (2 * x))
+#define SYSREG_GSCBLK_CFG2             (S3C_VA_SYS + 0x2000)
+#define PXLASYNC_LO_MASK_CAMIF_GSCL(x) (1 << (x))
+
+#endif /* EXYNOS_REGS_GSC_H_ */
index 9cc7c5e..ef1b3eb 100644 (file)
 #define HDMI_PHY_CMU                   HDMI_CTRL_BASE(0x007C)
 #define HDMI_CORE_RSTOUT               HDMI_CTRL_BASE(0x0080)
 
+/* PHY Control bit definition */
+
+/* HDMI_PHY_CON_0 */
+#define HDMI_PHY_POWER_OFF_EN          (1 << 0)
+
 /* Video related registers */
 #define HDMI_YMAX                      HDMI_CORE_BASE(0x0060)
 #define HDMI_YMIN                      HDMI_CORE_BASE(0x0064)
 #define HDMI_AVI_HEADER1               HDMI_CORE_BASE(0x0714)
 #define HDMI_AVI_HEADER2               HDMI_CORE_BASE(0x0718)
 #define HDMI_AVI_CHECK_SUM             HDMI_CORE_BASE(0x071C)
-#define HDMI_AVI_BYTE(n)               HDMI_CORE_BASE(0x0720 + 4 * (n))
+#define HDMI_AVI_BYTE(n)               HDMI_CORE_BASE(0x0720 + 4 * (n-1))
 
 #define HDMI_AUI_CON                   HDMI_CORE_BASE(0x0800)
 #define HDMI_AUI_HEADER0               HDMI_CORE_BASE(0x0810)
 #define HDMI_AUI_HEADER1               HDMI_CORE_BASE(0x0814)
 #define HDMI_AUI_HEADER2               HDMI_CORE_BASE(0x0818)
 #define HDMI_AUI_CHECK_SUM             HDMI_CORE_BASE(0x081C)
-#define HDMI_AUI_BYTE(n)               HDMI_CORE_BASE(0x0820 + 4 * (n))
+#define HDMI_AUI_BYTE(n)               HDMI_CORE_BASE(0x0820 + 4 * (n-1))
 
 #define HDMI_MPG_CON                   HDMI_CORE_BASE(0x0900)
 #define HDMI_MPG_CHECK_SUM             HDMI_CORE_BASE(0x091C)
 #define HDMI_AN_SEED_2                 HDMI_CORE_BASE(0x0E60)
 #define HDMI_AN_SEED_3                 HDMI_CORE_BASE(0x0E64)
 
+/* AVI bit definition */
+#define HDMI_AVI_CON_DO_NOT_TRANSMIT   (0 << 1)
+#define HDMI_AVI_CON_EVERY_VSYNC       (1 << 1)
+
+#define AVI_ACTIVE_FORMAT_VALID        (1 << 4)
+#define AVI_UNDERSCANNED_DISPLAY_VALID (1 << 1)
+
+/* AUI bit definition */
+#define HDMI_AUI_CON_NO_TRAN           (0 << 0)
+
+/* VSI bit definition */
+#define HDMI_VSI_CON_DO_NOT_TRANSMIT   (0 << 0)
+
 /* HDCP related registers */
 #define HDMI_HDCP_SHA1(n)              HDMI_CORE_BASE(0x7000 + 4 * (n))
 #define HDMI_HDCP_KSV_LIST(n)          HDMI_CORE_BASE(0x7050 + 4 * (n))
diff --git a/drivers/gpu/drm/exynos/regs-rotator.h b/drivers/gpu/drm/exynos/regs-rotator.h
new file mode 100644 (file)
index 0000000..a09ac6e
--- /dev/null
@@ -0,0 +1,73 @@
+/* drivers/gpu/drm/exynos/regs-rotator.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com/
+ *
+ * Register definition file for Samsung Rotator Interface (Rotator) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef EXYNOS_REGS_ROTATOR_H
+#define EXYNOS_REGS_ROTATOR_H
+
+/* Configuration */
+#define ROT_CONFIG                     0x00
+#define ROT_CONFIG_IRQ                 (3 << 8)
+
+/* Image Control */
+#define ROT_CONTROL                    0x10
+#define ROT_CONTROL_PATTERN_WRITE      (1 << 16)
+#define ROT_CONTROL_FMT_YCBCR420_2P    (1 << 8)
+#define ROT_CONTROL_FMT_RGB888         (6 << 8)
+#define ROT_CONTROL_FMT_MASK           (7 << 8)
+#define ROT_CONTROL_FLIP_VERTICAL      (2 << 6)
+#define ROT_CONTROL_FLIP_HORIZONTAL    (3 << 6)
+#define ROT_CONTROL_FLIP_MASK          (3 << 6)
+#define ROT_CONTROL_ROT_90             (1 << 4)
+#define ROT_CONTROL_ROT_180            (2 << 4)
+#define ROT_CONTROL_ROT_270            (3 << 4)
+#define ROT_CONTROL_ROT_MASK           (3 << 4)
+#define ROT_CONTROL_START              (1 << 0)
+
+/* Status */
+#define ROT_STATUS                     0x20
+#define ROT_STATUS_IRQ_PENDING(x)      (1 << (x))
+#define ROT_STATUS_IRQ(x)              (((x) >> 8) & 0x3)
+#define ROT_STATUS_IRQ_VAL_COMPLETE    1
+#define ROT_STATUS_IRQ_VAL_ILLEGAL     2
+
+/* Buffer Address */
+#define ROT_SRC_BUF_ADDR(n)            (0x30 + ((n) << 2))
+#define ROT_DST_BUF_ADDR(n)            (0x50 + ((n) << 2))
+
+/* Buffer Size */
+#define ROT_SRC_BUF_SIZE               0x3c
+#define ROT_DST_BUF_SIZE               0x5c
+#define ROT_SET_BUF_SIZE_H(x)          ((x) << 16)
+#define ROT_SET_BUF_SIZE_W(x)          ((x) << 0)
+#define ROT_GET_BUF_SIZE_H(x)          ((x) >> 16)
+#define ROT_GET_BUF_SIZE_W(x)          ((x) & 0xffff)
+
+/* Crop Position */
+#define ROT_SRC_CROP_POS               0x40
+#define ROT_DST_CROP_POS               0x60
+#define ROT_CROP_POS_Y(x)              ((x) << 16)
+#define ROT_CROP_POS_X(x)              ((x) << 0)
+
+/* Source Crop Size */
+#define ROT_SRC_CROP_SIZE              0x44
+#define ROT_SRC_CROP_SIZE_H(x)         ((x) << 16)
+#define ROT_SRC_CROP_SIZE_W(x)         ((x) << 0)
+
+/* Round to nearest aligned value */
+#define ROT_ALIGN(x, align, mask)      (((x) + (1 << ((align) - 1))) & (mask))
+/* Minimum limit value */
+#define ROT_MIN(min, mask)             (((min) + ~(mask)) & (mask))
+/* Maximum limit value */
+#define ROT_MAX(max, mask)             ((max) & (mask))
+
+#endif /* EXYNOS_REGS_ROTATOR_H */
+
index 1ceca3d..23e14e9 100644 (file)
@@ -523,7 +523,7 @@ void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
 
                dev_priv->force_audio_property = prop;
        }
-       drm_connector_attach_property(connector, prop, 0);
+       drm_object_attach_property(&connector->base, prop, 0);
 }
 
 
@@ -553,7 +553,7 @@ void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
                dev_priv->broadcast_rgb_property = prop;
        }
 
-       drm_connector_attach_property(connector, prop, 0);
+       drm_object_attach_property(&connector->base, prop, 0);
 }
 
 /* Cedarview */
index e3a3978..51044cc 100644 (file)
@@ -1650,7 +1650,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector,
        struct cdv_intel_dp *intel_dp = encoder->dev_priv;
        int ret;
 
-       ret = drm_connector_property_set_value(connector, property, val);
+       ret = drm_object_property_set_value(&connector->base, property, val);
        if (ret)
                return ret;
 
index 7272a46..e223b50 100644 (file)
@@ -185,14 +185,14 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
                        return -1;
                }
 
-               if (drm_connector_property_get_value(connector,
+               if (drm_object_property_get_value(&connector->base,
                                                        property, &curValue))
                        return -1;
 
                if (curValue == value)
                        return 0;
 
-               if (drm_connector_property_set_value(connector,
+               if (drm_object_property_set_value(&connector->base,
                                                        property, value))
                        return -1;
 
@@ -341,7 +341,7 @@ void cdv_hdmi_init(struct drm_device *dev,
        connector->interlace_allowed = false;
        connector->doublescan_allowed = false;
 
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                      dev->mode_config.scaling_mode_property,
                                      DRM_MODE_SCALE_FULLSCREEN);
 
index b362dd3..d81dbc3 100644 (file)
@@ -479,7 +479,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
                        return -1;
                }
 
-               if (drm_connector_property_get_value(connector,
+               if (drm_object_property_get_value(&connector->base,
                                                     property,
                                                     &curValue))
                        return -1;
@@ -487,7 +487,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
                if (curValue == value)
                        return 0;
 
-               if (drm_connector_property_set_value(connector,
+               if (drm_object_property_set_value(&connector->base,
                                                        property,
                                                        value))
                        return -1;
@@ -502,7 +502,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
                                return -1;
                }
        } else if (!strcmp(property->name, "backlight") && encoder) {
-               if (drm_connector_property_set_value(connector,
+               if (drm_object_property_set_value(&connector->base,
                                                        property,
                                                        value))
                        return -1;
@@ -671,10 +671,10 @@ void cdv_intel_lvds_init(struct drm_device *dev,
        connector->doublescan_allowed = false;
 
        /*Attach connector properties*/
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                      dev->mode_config.scaling_mode_property,
                                      DRM_MODE_SCALE_FULLSCREEN);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                      dev_priv->backlight_property,
                                      BRIGHTNESS_MAX_LEVEL);
 
index 637dd84..2d4ab48 100644 (file)
@@ -265,13 +265,13 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
                        goto set_prop_error;
                }
 
-               if (drm_connector_property_get_value(connector, property, &val))
+               if (drm_object_property_get_value(&connector->base, property, &val))
                        goto set_prop_error;
 
                if (val == value)
                        goto set_prop_done;
 
-               if (drm_connector_property_set_value(connector,
+               if (drm_object_property_set_value(&connector->base,
                                                        property, value))
                        goto set_prop_error;
 
@@ -296,7 +296,7 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
                        }
                }
        } else if (!strcmp(property->name, "backlight") && encoder) {
-               if (drm_connector_property_set_value(connector, property,
+               if (drm_object_property_set_value(&connector->base, property,
                                                                        value))
                        goto set_prop_error;
                else
@@ -572,10 +572,10 @@ void mdfld_dsi_output_init(struct drm_device *dev,
        connector->doublescan_allowed = false;
 
        /*attach properties*/
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                dev->mode_config.scaling_mode_property,
                                DRM_MODE_SCALE_FULLSCREEN);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                dev_priv->backlight_property,
                                MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
 
index dec6a9a..74485dc 100644 (file)
@@ -820,7 +820,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
        REG_WRITE(map->pos, 0);
 
        if (psb_intel_encoder)
-               drm_connector_property_get_value(connector,
+               drm_object_property_get_value(&connector->base,
                        dev->mode_config.scaling_mode_property, &scalingType);
 
        if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
index 4ec2962..3071526 100644 (file)
@@ -351,7 +351,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
                  (mode->crtc_vdisplay - 1));
 
        if (psb_intel_encoder)
-               drm_connector_property_get_value(connector,
+               drm_object_property_get_value(&connector->base,
                        dev->mode_config.scaling_mode_property, &scalingType);
 
        if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
index 558c77f..325013a 100644 (file)
@@ -133,8 +133,8 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
                return;
        }
 
-       drm_connector_property_get_value(
-               connector,
+       drm_object_property_get_value(
+               &connector->base,
                dev->mode_config.scaling_mode_property,
                &v);
 
@@ -363,10 +363,10 @@ void oaktrail_lvds_init(struct drm_device *dev,
        connector->interlace_allowed = false;
        connector->doublescan_allowed = false;
 
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                        dev->mode_config.scaling_mode_property,
                                        DRM_MODE_SCALE_FULLSCREEN);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                        dev_priv->backlight_property,
                                        BRIGHTNESS_MAX_LEVEL);
 
index 2a4c3a9..9fa5fa2 100644 (file)
@@ -603,7 +603,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
                        goto set_prop_error;
                }
 
-               if (drm_connector_property_get_value(connector,
+               if (drm_object_property_get_value(&connector->base,
                                                     property,
                                                     &curval))
                        goto set_prop_error;
@@ -611,7 +611,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
                if (curval == value)
                        goto set_prop_done;
 
-               if (drm_connector_property_set_value(connector,
+               if (drm_object_property_set_value(&connector->base,
                                                        property,
                                                        value))
                        goto set_prop_error;
@@ -626,7 +626,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
                                goto set_prop_error;
                }
        } else if (!strcmp(property->name, "backlight")) {
-               if (drm_connector_property_set_value(connector,
+               if (drm_object_property_set_value(&connector->base,
                                                        property,
                                                        value))
                        goto set_prop_error;
@@ -746,10 +746,10 @@ void psb_intel_lvds_init(struct drm_device *dev,
        connector->doublescan_allowed = false;
 
        /*Attach connector properties*/
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                      dev->mode_config.scaling_mode_property,
                                      DRM_MODE_SCALE_FULLSCREEN);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                      dev_priv->backlight_property,
                                      BRIGHTNESS_MAX_LEVEL);
 
index fc92927..a4cc777 100644 (file)
@@ -1694,7 +1694,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
        uint8_t cmd;
        int ret;
 
-       ret = drm_connector_property_set_value(connector, property, val);
+       ret = drm_object_property_set_value(&connector->base, property, val);
        if (ret)
                return ret;
 
@@ -1749,7 +1749,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
        } else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) {
                temp_value = val;
                if (psb_intel_sdvo_connector->left == property) {
-                       drm_connector_property_set_value(connector,
+                       drm_object_property_set_value(&connector->base,
                                                         psb_intel_sdvo_connector->right, val);
                        if (psb_intel_sdvo_connector->left_margin == temp_value)
                                return 0;
@@ -1761,7 +1761,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
                        cmd = SDVO_CMD_SET_OVERSCAN_H;
                        goto set_value;
                } else if (psb_intel_sdvo_connector->right == property) {
-                       drm_connector_property_set_value(connector,
+                       drm_object_property_set_value(&connector->base,
                                                         psb_intel_sdvo_connector->left, val);
                        if (psb_intel_sdvo_connector->right_margin == temp_value)
                                return 0;
@@ -1773,7 +1773,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
                        cmd = SDVO_CMD_SET_OVERSCAN_H;
                        goto set_value;
                } else if (psb_intel_sdvo_connector->top == property) {
-                       drm_connector_property_set_value(connector,
+                       drm_object_property_set_value(&connector->base,
                                                         psb_intel_sdvo_connector->bottom, val);
                        if (psb_intel_sdvo_connector->top_margin == temp_value)
                                return 0;
@@ -1785,7 +1785,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
                        cmd = SDVO_CMD_SET_OVERSCAN_V;
                        goto set_value;
                } else if (psb_intel_sdvo_connector->bottom == property) {
-                       drm_connector_property_set_value(connector,
+                       drm_object_property_set_value(&connector->base,
                                                         psb_intel_sdvo_connector->top, val);
                        if (psb_intel_sdvo_connector->bottom_margin == temp_value)
                                return 0;
@@ -2286,7 +2286,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
                                i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
 
        psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
-       drm_connector_attach_property(&psb_intel_sdvo_connector->base.base,
+       drm_object_attach_property(&psb_intel_sdvo_connector->base.base.base,
                                      psb_intel_sdvo_connector->tv_format, 0);
        return true;
 
@@ -2302,7 +2302,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
                psb_intel_sdvo_connector->name = \
                        drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
                if (!psb_intel_sdvo_connector->name) return false; \
-               drm_connector_attach_property(connector, \
+               drm_object_attach_property(&connector->base, \
                                              psb_intel_sdvo_connector->name, \
                                              psb_intel_sdvo_connector->cur_##name); \
                DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@@ -2339,7 +2339,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
                if (!psb_intel_sdvo_connector->left)
                        return false;
 
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              psb_intel_sdvo_connector->left,
                                              psb_intel_sdvo_connector->left_margin);
 
@@ -2348,7 +2348,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
                if (!psb_intel_sdvo_connector->right)
                        return false;
 
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              psb_intel_sdvo_connector->right,
                                              psb_intel_sdvo_connector->right_margin);
                DRM_DEBUG_KMS("h_overscan: max %d, "
@@ -2375,7 +2375,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
                if (!psb_intel_sdvo_connector->top)
                        return false;
 
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              psb_intel_sdvo_connector->top,
                                              psb_intel_sdvo_connector->top_margin);
 
@@ -2384,7 +2384,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
                if (!psb_intel_sdvo_connector->bottom)
                        return false;
 
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              psb_intel_sdvo_connector->bottom,
                                              psb_intel_sdvo_connector->bottom_margin);
                DRM_DEBUG_KMS("v_overscan: max %d, "
@@ -2416,7 +2416,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
                if (!psb_intel_sdvo_connector->dot_crawl)
                        return false;
 
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              psb_intel_sdvo_connector->dot_crawl,
                                              psb_intel_sdvo_connector->cur_dot_crawl);
                DRM_DEBUG_KMS("dot crawl: current %d\n", response);
index 599099f..b865d07 100644 (file)
@@ -214,7 +214,7 @@ static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encod
        else
                priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
 
-       drm_connector_property_set_value(connector,
+       drm_object_property_set_value(&connector->base,
                        encoder->dev->mode_config.tv_subconnector_property,
                                                        priv->subconnector);
 
@@ -254,23 +254,23 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
 
        priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
 
-       drm_connector_attach_property(connector, conf->tv_select_subconnector_property,
+       drm_object_attach_property(&connector->base, conf->tv_select_subconnector_property,
                                      priv->select_subconnector);
-       drm_connector_attach_property(connector, conf->tv_subconnector_property,
+       drm_object_attach_property(&connector->base, conf->tv_subconnector_property,
                                      priv->subconnector);
-       drm_connector_attach_property(connector, conf->tv_left_margin_property,
+       drm_object_attach_property(&connector->base, conf->tv_left_margin_property,
                                      priv->hmargin);
-       drm_connector_attach_property(connector, conf->tv_bottom_margin_property,
+       drm_object_attach_property(&connector->base, conf->tv_bottom_margin_property,
                                      priv->vmargin);
-       drm_connector_attach_property(connector, conf->tv_mode_property,
+       drm_object_attach_property(&connector->base, conf->tv_mode_property,
                                      priv->norm);
-       drm_connector_attach_property(connector, conf->tv_brightness_property,
+       drm_object_attach_property(&connector->base, conf->tv_brightness_property,
                                      priv->brightness);
-       drm_connector_attach_property(connector, conf->tv_contrast_property,
+       drm_object_attach_property(&connector->base, conf->tv_contrast_property,
                                      priv->contrast);
-       drm_connector_attach_property(connector, conf->tv_flicker_reduction_property,
+       drm_object_attach_property(&connector->base, conf->tv_flicker_reduction_property,
                                      priv->flicker);
-       drm_connector_attach_property(connector, priv->scale_property,
+       drm_object_attach_property(&connector->base, priv->scale_property,
                                      priv->scale);
 
        return 0;
index 87e9b92..55ffba1 100644 (file)
@@ -499,12 +499,8 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
 
        edp = find_section(bdb, BDB_EDP);
        if (!edp) {
-               if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
-                       DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
-                                     "supported, assume %dbpp panel color "
-                                     "depth.\n",
-                                     dev_priv->edp.bpp);
-               }
+               if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support)
+                       DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
                return;
        }
 
@@ -657,9 +653,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
        dev_priv->lvds_use_ssc = 1;
        dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
        DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
-
-       /* eDP data */
-       dev_priv->edp.bpp = 18;
 }
 
 static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
index fe20bf7..9293878 100644 (file)
@@ -143,7 +143,7 @@ static void intel_crt_dpms(struct drm_connector *connector, int mode)
        int old_dpms;
 
        /* PCH platforms and VLV only support on/off. */
-       if (INTEL_INFO(dev)->gen < 5 && mode != DRM_MODE_DPMS_ON)
+       if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
                mode = DRM_MODE_DPMS_OFF;
 
        if (mode == connector->dpms)
index 82267b2..5d127e0 100644 (file)
@@ -4118,6 +4118,17 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
                        }
                }
 
+               if (intel_encoder->type == INTEL_OUTPUT_EDP) {
+                       /* Use VBT settings if we have an eDP panel */
+                       unsigned int edp_bpc = dev_priv->edp.bpp / 3;
+
+                       if (edp_bpc && edp_bpc < display_bpc) {
+                               DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
+                               display_bpc = edp_bpc;
+                       }
+                       continue;
+               }
+
                /*
                 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
                 * through, clamp it down.  (Note: >12bpc will be caught below.)
index d76258d..1b63d55 100644 (file)
@@ -2569,8 +2569,8 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
 
        if (is_edp(intel_dp)) {
                drm_mode_create_scaling_mode_property(connector->dev);
-               drm_connector_attach_property(
-                       connector,
+               drm_object_attach_property(
+                       &connector->base,
                        connector->dev->mode_config.scaling_mode_property,
                        DRM_MODE_SCALE_ASPECT);
                intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
index 42839fc..496caa7 100644 (file)
@@ -2379,15 +2379,9 @@ int intel_enable_rc6(const struct drm_device *dev)
        if (i915_enable_rc6 >= 0)
                return i915_enable_rc6;
 
-       if (INTEL_INFO(dev)->gen == 5) {
-#ifdef CONFIG_INTEL_IOMMU
-               /* Disable rc6 on ilk if VT-d is on. */
-               if (intel_iommu_gfx_mapped)
-                       return false;
-#endif
-               DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");
-               return INTEL_RC6_ENABLE;
-       }
+       /* Disable RC6 on Ironlake */
+       if (INTEL_INFO(dev)->gen == 5)
+               return 0;
 
        if (IS_HASWELL(dev)) {
                DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
index 0e03985..c275bf0 100644 (file)
@@ -2251,7 +2251,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
                connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
                intel_sdvo->is_hdmi = true;
        }
-       intel_sdvo->base.cloneable = true;
 
        intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
        if (intel_sdvo->is_hdmi)
@@ -2282,7 +2281,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
 
        intel_sdvo->is_tv = true;
        intel_sdvo->base.needs_tv_clock = true;
-       intel_sdvo->base.cloneable = false;
 
        intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
 
@@ -2325,8 +2323,6 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
                intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
        }
 
-       intel_sdvo->base.cloneable = true;
-
        intel_sdvo_connector_init(intel_sdvo_connector,
                                  intel_sdvo);
        return true;
@@ -2357,9 +2353,6 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
                intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
        }
 
-       /* SDVO LVDS is not cloneable because the input mode gets adjusted by the encoder */
-       intel_sdvo->base.cloneable = false;
-
        intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
        if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
                goto err;
@@ -2432,6 +2425,18 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
        return true;
 }
 
+static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
+{
+       struct drm_device *dev = intel_sdvo->base.base.dev;
+       struct drm_connector *connector, *tmp;
+
+       list_for_each_entry_safe(connector, tmp,
+                                &dev->mode_config.connector_list, head) {
+               if (intel_attached_encoder(connector) == &intel_sdvo->base)
+                       intel_sdvo_destroy(connector);
+       }
+}
+
 static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
                                          struct intel_sdvo_connector *intel_sdvo_connector,
                                          int type)
@@ -2753,9 +2758,20 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
                                    intel_sdvo->caps.output_flags) != true) {
                DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
                              SDVO_NAME(intel_sdvo));
-               goto err;
+               /* Output_setup can leave behind connectors! */
+               goto err_output;
        }
 
+       /*
+        * Cloning SDVO with anything is often impossible, since the SDVO
+        * encoder can request a special input timing mode. And even if that's
+        * not the case we have evidence that cloning a plain unscaled mode with
+        * VGA doesn't really work. Furthermore the cloning flags are way too
+        * simplistic anyway to express such constraints, so just give up on
+        * cloning for SDVO encoders.
+        */
+       intel_sdvo->base.cloneable = false;
+
        /* Only enable the hotplug irq if we need it, to work around noisy
         * hotplug lines.
         */
@@ -2766,12 +2782,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
 
        /* Set the input timing to the screen. Assume always input 0. */
        if (!intel_sdvo_set_target_input(intel_sdvo))
-               goto err;
+               goto err_output;
 
        if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
                                                    &intel_sdvo->pixel_clock_min,
                                                    &intel_sdvo->pixel_clock_max))
-               goto err;
+               goto err_output;
 
        DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
                        "clock range %dMHz - %dMHz, "
@@ -2791,6 +2807,9 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
                        (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
        return true;
 
+err_output:
+       intel_sdvo_output_cleanup(intel_sdvo);
+
 err:
        drm_encoder_cleanup(&intel_encoder->base);
        i2c_del_adapter(&intel_sdvo->ddc);
index 49d60a6..8fc9d92 100644 (file)
@@ -186,11 +186,11 @@ static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_r
 
 static int mgag200_bo_move(struct ttm_buffer_object *bo,
                       bool evict, bool interruptible,
-                      bool no_wait_reserve, bool no_wait_gpu,
+                      bool no_wait_gpu,
                       struct ttm_mem_reg *new_mem)
 {
        int r;
-       r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+       r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
        return r;
 }
 
@@ -382,7 +382,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
        mgag200_ttm_placement(bo, pl_flag);
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
        if (ret)
                return ret;
 
@@ -405,7 +405,7 @@ int mgag200_bo_unpin(struct mgag200_bo *bo)
 
        for (i = 0; i < bo->placement.num_placement ; i++)
                bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
        if (ret)
                return ret;
 
@@ -430,7 +430,7 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo)
        for (i = 0; i < bo->placement.num_placement ; i++)
                bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
 
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
        if (ret) {
                DRM_ERROR("pushing to VRAM failed\n");
                return ret;
index a990df4..ab25752 100644 (file)
@@ -11,6 +11,7 @@ nouveau-y := core/core/client.o
 nouveau-y += core/core/engctx.o
 nouveau-y += core/core/engine.o
 nouveau-y += core/core/enum.o
+nouveau-y += core/core/falcon.o
 nouveau-y += core/core/gpuobj.o
 nouveau-y += core/core/handle.o
 nouveau-y += core/core/mm.o
@@ -29,6 +30,7 @@ nouveau-y += core/subdev/bios/base.o
 nouveau-y += core/subdev/bios/bit.o
 nouveau-y += core/subdev/bios/conn.o
 nouveau-y += core/subdev/bios/dcb.o
+nouveau-y += core/subdev/bios/disp.o
 nouveau-y += core/subdev/bios/dp.o
 nouveau-y += core/subdev/bios/extdev.o
 nouveau-y += core/subdev/bios/gpio.o
@@ -64,9 +66,19 @@ nouveau-y += core/subdev/devinit/nv50.o
 nouveau-y += core/subdev/fb/base.o
 nouveau-y += core/subdev/fb/nv04.o
 nouveau-y += core/subdev/fb/nv10.o
+nouveau-y += core/subdev/fb/nv1a.o
 nouveau-y += core/subdev/fb/nv20.o
+nouveau-y += core/subdev/fb/nv25.o
 nouveau-y += core/subdev/fb/nv30.o
+nouveau-y += core/subdev/fb/nv35.o
+nouveau-y += core/subdev/fb/nv36.o
 nouveau-y += core/subdev/fb/nv40.o
+nouveau-y += core/subdev/fb/nv41.o
+nouveau-y += core/subdev/fb/nv44.o
+nouveau-y += core/subdev/fb/nv46.o
+nouveau-y += core/subdev/fb/nv47.o
+nouveau-y += core/subdev/fb/nv49.o
+nouveau-y += core/subdev/fb/nv4e.o
 nouveau-y += core/subdev/fb/nv50.o
 nouveau-y += core/subdev/fb/nvc0.o
 nouveau-y += core/subdev/gpio/base.o
@@ -111,7 +123,10 @@ nouveau-y += core/engine/dmaobj/base.o
 nouveau-y += core/engine/dmaobj/nv04.o
 nouveau-y += core/engine/dmaobj/nv50.o
 nouveau-y += core/engine/dmaobj/nvc0.o
+nouveau-y += core/engine/dmaobj/nvd0.o
 nouveau-y += core/engine/bsp/nv84.o
+nouveau-y += core/engine/bsp/nvc0.o
+nouveau-y += core/engine/bsp/nve0.o
 nouveau-y += core/engine/copy/nva3.o
 nouveau-y += core/engine/copy/nvc0.o
 nouveau-y += core/engine/copy/nve0.o
@@ -119,7 +134,21 @@ nouveau-y += core/engine/crypt/nv84.o
 nouveau-y += core/engine/crypt/nv98.o
 nouveau-y += core/engine/disp/nv04.o
 nouveau-y += core/engine/disp/nv50.o
+nouveau-y += core/engine/disp/nv84.o
+nouveau-y += core/engine/disp/nv94.o
+nouveau-y += core/engine/disp/nva0.o
+nouveau-y += core/engine/disp/nva3.o
 nouveau-y += core/engine/disp/nvd0.o
+nouveau-y += core/engine/disp/nve0.o
+nouveau-y += core/engine/disp/dacnv50.o
+nouveau-y += core/engine/disp/hdanva3.o
+nouveau-y += core/engine/disp/hdanvd0.o
+nouveau-y += core/engine/disp/hdminv84.o
+nouveau-y += core/engine/disp/hdminva3.o
+nouveau-y += core/engine/disp/hdminvd0.o
+nouveau-y += core/engine/disp/sornv50.o
+nouveau-y += core/engine/disp/sornv94.o
+nouveau-y += core/engine/disp/sornvd0.o
 nouveau-y += core/engine/disp/vga.o
 nouveau-y += core/engine/fifo/base.o
 nouveau-y += core/engine/fifo/nv04.o
@@ -151,11 +180,14 @@ nouveau-y += core/engine/mpeg/nv40.o
 nouveau-y += core/engine/mpeg/nv50.o
 nouveau-y += core/engine/mpeg/nv84.o
 nouveau-y += core/engine/ppp/nv98.o
+nouveau-y += core/engine/ppp/nvc0.o
 nouveau-y += core/engine/software/nv04.o
 nouveau-y += core/engine/software/nv10.o
 nouveau-y += core/engine/software/nv50.o
 nouveau-y += core/engine/software/nvc0.o
 nouveau-y += core/engine/vp/nv84.o
+nouveau-y += core/engine/vp/nvc0.o
+nouveau-y += core/engine/vp/nve0.o
 
 # drm/core
 nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
@@ -166,7 +198,7 @@ nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o
 
 # drm/kms
 nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o
-nouveau-y += nouveau_connector.o nouveau_hdmi.o nouveau_dp.o
+nouveau-y += nouveau_connector.o nouveau_dp.o
 nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o
 
 # drm/kms/nv04:nv50
@@ -175,9 +207,7 @@ nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o
 nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o
 
 # drm/kms/nv50-
-nouveau-y += nv50_display.o nvd0_display.o
-nouveau-y += nv50_crtc.o nv50_dac.o nv50_sor.o nv50_cursor.o
-nouveau-y += nv50_evo.o
+nouveau-y += nv50_display.o
 
 # drm/pm
 nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o
index e41b10d..84c71fa 100644 (file)
@@ -189,6 +189,21 @@ nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend)
        return nouveau_gpuobj_fini(&engctx->base, suspend);
 }
 
+int
+_nouveau_engctx_ctor(struct nouveau_object *parent,
+                    struct nouveau_object *engine,
+                    struct nouveau_oclass *oclass, void *data, u32 size,
+                    struct nouveau_object **pobject)
+{
+       struct nouveau_engctx *engctx;
+       int ret;
+
+       ret = nouveau_engctx_create(parent, engine, oclass, NULL, 256, 256,
+                                   NVOBJ_FLAG_ZERO_ALLOC, &engctx);
+       *pobject = nv_object(engctx);
+       return ret;
+}
+
 void
 _nouveau_engctx_dtor(struct nouveau_object *object)
 {
diff --git a/drivers/gpu/drm/nouveau/core/core/falcon.c b/drivers/gpu/drm/nouveau/core/core/falcon.c
new file mode 100644 (file)
index 0000000..6b0843c
--- /dev/null
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <core/falcon.h>
+
+#include <subdev/timer.h>
+
+u32
+_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr)
+{
+       struct nouveau_falcon *falcon = (void *)object;
+       return nv_rd32(falcon, falcon->addr + addr);
+}
+
+void
+_nouveau_falcon_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+       struct nouveau_falcon *falcon = (void *)object;
+       nv_wr32(falcon, falcon->addr + addr, data);
+}
+
+int
+_nouveau_falcon_init(struct nouveau_object *object)
+{
+       struct nouveau_device *device = nv_device(object);
+       struct nouveau_falcon *falcon = (void *)object;
+       const struct firmware *fw;
+       char name[32] = "internal";
+       int ret, i;
+       u32 caps;
+
+       /* enable engine, and determine its capabilities */
+       ret = nouveau_engine_init(&falcon->base);
+       if (ret)
+               return ret;
+
+       if (device->chipset <  0xa3 ||
+           device->chipset == 0xaa || device->chipset == 0xac) {
+               falcon->version = 0;
+               falcon->secret  = (falcon->addr == 0x087000) ? 1 : 0;
+       } else {
+               caps = nv_ro32(falcon, 0x12c);
+               falcon->version = (caps & 0x0000000f);
+               falcon->secret  = (caps & 0x00000030) >> 4;
+       }
+
+       caps = nv_ro32(falcon, 0x108);
+       falcon->code.limit = (caps & 0x000001ff) << 8;
+       falcon->data.limit = (caps & 0x0003fe00) >> 1;
+
+       nv_debug(falcon, "falcon version: %d\n", falcon->version);
+       nv_debug(falcon, "secret level: %d\n", falcon->secret);
+       nv_debug(falcon, "code limit: %d\n", falcon->code.limit);
+       nv_debug(falcon, "data limit: %d\n", falcon->data.limit);
+
+       /* wait for 'uc halted' to be signalled before continuing */
+       if (falcon->secret) {
+               nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
+               nv_wo32(falcon, 0x004, 0x00000010);
+       }
+
+       /* disable all interrupts */
+       nv_wo32(falcon, 0x014, 0xffffffff);
+
+       /* no default ucode provided by the engine implementation, try and
+        * locate a "self-bootstrapping" firmware image for the engine
+        */
+       if (!falcon->code.data) {
+               snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
+                        device->chipset, falcon->addr >> 12);
+
+               ret = request_firmware(&fw, name, &device->pdev->dev);
+               if (ret == 0) {
+                       falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+                       falcon->code.size = fw->size;
+                       falcon->data.data = NULL;
+                       falcon->data.size = 0;
+                       release_firmware(fw);
+               }
+
+               falcon->external = true;
+       }
+
+       /* next step is to try and load "static code/data segment" firmware
+        * images for the engine
+        */
+       if (!falcon->code.data) {
+               snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
+                        device->chipset, falcon->addr >> 12);
+
+               ret = request_firmware(&fw, name, &device->pdev->dev);
+               if (ret) {
+                       nv_error(falcon, "unable to load firmware data\n");
+                       return ret;
+               }
+
+               falcon->data.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+               falcon->data.size = fw->size;
+               release_firmware(fw);
+               if (!falcon->data.data)
+                       return -ENOMEM;
+
+               snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
+                        device->chipset, falcon->addr >> 12);
+
+               ret = request_firmware(&fw, name, &device->pdev->dev);
+               if (ret) {
+                       nv_error(falcon, "unable to load firmware code\n");
+                       return ret;
+               }
+
+               falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+               falcon->code.size = fw->size;
+               release_firmware(fw);
+               if (!falcon->code.data)
+                       return -ENOMEM;
+       }
+
+       nv_debug(falcon, "firmware: %s (%s)\n", name, falcon->data.data ?
+                "static code/data segments" : "self-bootstrapping");
+
+       /* ensure any "self-bootstrapping" firmware image is in vram */
+       if (!falcon->data.data && !falcon->core) {
+               ret = nouveau_gpuobj_new(object->parent, NULL,
+                                        falcon->code.size, 256, 0,
+                                       &falcon->core);
+               if (ret) {
+                       nv_error(falcon, "core allocation failed, %d\n", ret);
+                       return ret;
+               }
+
+               for (i = 0; i < falcon->code.size; i += 4)
+                       nv_wo32(falcon->core, i, falcon->code.data[i / 4]);
+       }
+
+       /* upload firmware bootloader (or the full code segments) */
+       if (falcon->core) {
+               if (device->card_type < NV_C0)
+                       nv_wo32(falcon, 0x618, 0x04000000);
+               else
+                       nv_wo32(falcon, 0x618, 0x00000114);
+               nv_wo32(falcon, 0x11c, 0);
+               nv_wo32(falcon, 0x110, falcon->core->addr >> 8);
+               nv_wo32(falcon, 0x114, 0);
+               nv_wo32(falcon, 0x118, 0x00006610);
+       } else {
+               if (falcon->code.size > falcon->code.limit ||
+                   falcon->data.size > falcon->data.limit) {
+                       nv_error(falcon, "ucode exceeds falcon limit(s)\n");
+                       return -EINVAL;
+               }
+
+               if (falcon->version < 3) {
+                       nv_wo32(falcon, 0xff8, 0x00100000);
+                       for (i = 0; i < falcon->code.size / 4; i++)
+                               nv_wo32(falcon, 0xff4, falcon->code.data[i]);
+               } else {
+                       nv_wo32(falcon, 0x180, 0x01000000);
+                       for (i = 0; i < falcon->code.size / 4; i++) {
+                               if ((i & 0x3f) == 0)
+                                       nv_wo32(falcon, 0x188, i >> 6);
+                               nv_wo32(falcon, 0x184, falcon->code.data[i]);
+                       }
+               }
+       }
+
+       /* upload data segment (if necessary), zeroing the remainder */
+       if (falcon->version < 3) {
+               nv_wo32(falcon, 0xff8, 0x00000000);
+               for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
+                       nv_wo32(falcon, 0xff4, falcon->data.data[i]);
+               for (; i < falcon->data.limit; i += 4)
+                       nv_wo32(falcon, 0xff4, 0x00000000);
+       } else {
+               nv_wo32(falcon, 0x1c0, 0x01000000);
+               for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
+                       nv_wo32(falcon, 0x1c4, falcon->data.data[i]);
+               for (; i < falcon->data.limit / 4; i++)
+                       nv_wo32(falcon, 0x1c4, 0x00000000);
+       }
+
+       /* start it running */
+       nv_wo32(falcon, 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
+       nv_wo32(falcon, 0x104, 0x00000000); /* ENTRY */
+       nv_wo32(falcon, 0x100, 0x00000002); /* TRIGGER */
+       nv_wo32(falcon, 0x048, 0x00000003); /* FIFO | CHSW */
+       return 0;
+}
+
+int
+_nouveau_falcon_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nouveau_falcon *falcon = (void *)object;
+
+       if (!suspend) {
+               nouveau_gpuobj_ref(NULL, &falcon->core);
+               if (falcon->external) {
+                       kfree(falcon->data.data);
+                       kfree(falcon->code.data);
+                       falcon->code.data = NULL;
+               }
+       }
+
+       nv_mo32(falcon, 0x048, 0x00000003, 0x00000000);
+       nv_wo32(falcon, 0x014, 0xffffffff);
+
+       return nouveau_engine_fini(&falcon->base, suspend);
+}
+
+int
+nouveau_falcon_create_(struct nouveau_object *parent,
+                      struct nouveau_object *engine,
+                      struct nouveau_oclass *oclass, u32 addr, bool enable,
+                      const char *iname, const char *fname,
+                      int length, void **pobject)
+{
+       struct nouveau_falcon *falcon;
+       int ret;
+
+       ret = nouveau_engine_create_(parent, engine, oclass, enable, iname,
+                                    fname, length, pobject);
+       falcon = *pobject;
+       if (ret)
+               return ret;
+
+       falcon->addr = addr;
+       return 0;
+}
index 70586fd..560b221 100644 (file)
@@ -183,7 +183,7 @@ _nouveau_gpuobj_fini(struct nouveau_object *object, bool suspend)
 }
 
 u32
-_nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr)
+_nouveau_gpuobj_rd32(struct nouveau_object *object, u64 addr)
 {
        struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
        struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
@@ -193,7 +193,7 @@ _nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr)
 }
 
 void
-_nouveau_gpuobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+_nouveau_gpuobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
        struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
        struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
index a6d3cd6..0261a11 100644 (file)
@@ -234,15 +234,18 @@ nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
 int
 nouveau_mm_fini(struct nouveau_mm *mm)
 {
-       struct nouveau_mm_node *node, *heap =
-               list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry);
-       int nodes = 0;
+       if (nouveau_mm_initialised(mm)) {
+               struct nouveau_mm_node *node, *heap =
+                       list_first_entry(&mm->nodes, typeof(*heap), nl_entry);
+               int nodes = 0;
+
+               list_for_each_entry(node, &mm->nodes, nl_entry) {
+                       if (WARN_ON(nodes++ == mm->heap_nodes))
+                               return -EBUSY;
+               }
 
-       list_for_each_entry(node, &mm->nodes, nl_entry) {
-               if (WARN_ON(nodes++ == mm->heap_nodes))
-                       return -EBUSY;
+               kfree(heap);
        }
 
-       kfree(heap);
        return 0;
 }
index 66f7dfd..1d9f614 100644 (file)
  * Authors: Ben Skeggs
  */
 
-#include <core/os.h>
-#include <core/class.h>
 #include <core/engctx.h>
+#include <core/class.h>
 
 #include <engine/bsp.h>
 
 struct nv84_bsp_priv {
-       struct nouveau_bsp base;
-};
-
-struct nv84_bsp_chan {
-       struct nouveau_bsp_chan base;
+       struct nouveau_engine base;
 };
 
 /*******************************************************************************
@@ -49,61 +44,16 @@ nv84_bsp_sclass[] = {
  * BSP context
  ******************************************************************************/
 
-static int
-nv84_bsp_context_ctor(struct nouveau_object *parent,
-                     struct nouveau_object *engine,
-                     struct nouveau_oclass *oclass, void *data, u32 size,
-                     struct nouveau_object **pobject)
-{
-       struct nv84_bsp_chan *priv;
-       int ret;
-
-       ret = nouveau_bsp_context_create(parent, engine, oclass, NULL,
-                                        0, 0, 0, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static void
-nv84_bsp_context_dtor(struct nouveau_object *object)
-{
-       struct nv84_bsp_chan *priv = (void *)object;
-       nouveau_bsp_context_destroy(&priv->base);
-}
-
-static int
-nv84_bsp_context_init(struct nouveau_object *object)
-{
-       struct nv84_bsp_chan *priv = (void *)object;
-       int ret;
-
-       ret = nouveau_bsp_context_init(&priv->base);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int
-nv84_bsp_context_fini(struct nouveau_object *object, bool suspend)
-{
-       struct nv84_bsp_chan *priv = (void *)object;
-       return nouveau_bsp_context_fini(&priv->base, suspend);
-}
-
 static struct nouveau_oclass
 nv84_bsp_cclass = {
        .handle = NV_ENGCTX(BSP, 0x84),
        .ofuncs = &(struct nouveau_ofuncs) {
-               .ctor = nv84_bsp_context_ctor,
-               .dtor = nv84_bsp_context_dtor,
-               .init = nv84_bsp_context_init,
-               .fini = nv84_bsp_context_fini,
-               .rd32 = _nouveau_bsp_context_rd32,
-               .wr32 = _nouveau_bsp_context_wr32,
+               .ctor = _nouveau_engctx_ctor,
+               .dtor = _nouveau_engctx_dtor,
+               .init = _nouveau_engctx_init,
+               .fini = _nouveau_engctx_fini,
+               .rd32 = _nouveau_engctx_rd32,
+               .wr32 = _nouveau_engctx_wr32,
        },
 };
 
@@ -111,11 +61,6 @@ nv84_bsp_cclass = {
  * BSP engine/subdev functions
  ******************************************************************************/
 
-static void
-nv84_bsp_intr(struct nouveau_subdev *subdev)
-{
-}
-
 static int
 nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
              struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +69,25 @@ nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nv84_bsp_priv *priv;
        int ret;
 
-       ret = nouveau_bsp_create(parent, engine, oclass, &priv);
+       ret = nouveau_engine_create(parent, engine, oclass, true,
+                                   "PBSP", "bsp", &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
 
        nv_subdev(priv)->unit = 0x04008000;
-       nv_subdev(priv)->intr = nv84_bsp_intr;
        nv_engine(priv)->cclass = &nv84_bsp_cclass;
        nv_engine(priv)->sclass = nv84_bsp_sclass;
        return 0;
 }
 
-static void
-nv84_bsp_dtor(struct nouveau_object *object)
-{
-       struct nv84_bsp_priv *priv = (void *)object;
-       nouveau_bsp_destroy(&priv->base);
-}
-
-static int
-nv84_bsp_init(struct nouveau_object *object)
-{
-       struct nv84_bsp_priv *priv = (void *)object;
-       int ret;
-
-       ret = nouveau_bsp_init(&priv->base);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int
-nv84_bsp_fini(struct nouveau_object *object, bool suspend)
-{
-       struct nv84_bsp_priv *priv = (void *)object;
-       return nouveau_bsp_fini(&priv->base, suspend);
-}
-
 struct nouveau_oclass
 nv84_bsp_oclass = {
        .handle = NV_ENGINE(BSP, 0x84),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nv84_bsp_ctor,
-               .dtor = nv84_bsp_dtor,
-               .init = nv84_bsp_init,
-               .fini = nv84_bsp_fini,
+               .dtor = _nouveau_engine_dtor,
+               .init = _nouveau_engine_init,
+               .fini = _nouveau_engine_fini,
        },
 };
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
new file mode 100644 (file)
index 0000000..0a5aa6b
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/bsp.h>
+
+struct nvc0_bsp_priv {
+       struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_bsp_sclass[] = {
+       { 0x90b1, &nouveau_object_ofuncs },
+       {},
+};
+
+/*******************************************************************************
+ * PBSP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_bsp_cclass = {
+       .handle = NV_ENGCTX(BSP, 0xc0),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = _nouveau_falcon_context_ctor,
+               .dtor = _nouveau_falcon_context_dtor,
+               .init = _nouveau_falcon_context_init,
+               .fini = _nouveau_falcon_context_fini,
+               .rd32 = _nouveau_falcon_context_rd32,
+               .wr32 = _nouveau_falcon_context_wr32,
+       },
+};
+
+/*******************************************************************************
+ * PBSP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_bsp_init(struct nouveau_object *object)
+{
+       struct nvc0_bsp_priv *priv = (void *)object;
+       int ret;
+
+       ret = nouveau_falcon_init(&priv->base);
+       if (ret)
+               return ret;
+
+       nv_wr32(priv, 0x084010, 0x0000fff2);
+       nv_wr32(priv, 0x08401c, 0x0000fff2);
+       return 0;
+}
+
+static int
+nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+             struct nouveau_oclass *oclass, void *data, u32 size,
+             struct nouveau_object **pobject)
+{
+       struct nvc0_bsp_priv *priv;
+       int ret;
+
+       ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
+                                   "PBSP", "bsp", &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_subdev(priv)->unit = 0x00008000;
+       nv_engine(priv)->cclass = &nvc0_bsp_cclass;
+       nv_engine(priv)->sclass = nvc0_bsp_sclass;
+       return 0;
+}
+
+struct nouveau_oclass
+nvc0_bsp_oclass = {
+       .handle = NV_ENGINE(BSP, 0xc0),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nvc0_bsp_ctor,
+               .dtor = _nouveau_falcon_dtor,
+               .init = nvc0_bsp_init,
+               .fini = _nouveau_falcon_fini,
+               .rd32 = _nouveau_falcon_rd32,
+               .wr32 = _nouveau_falcon_wr32,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
new file mode 100644 (file)
index 0000000..d4f23bb
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/falcon.h>
+
+#include <engine/bsp.h>
+
+struct nve0_bsp_priv {
+       struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_bsp_sclass[] = {
+       { 0x95b1, &nouveau_object_ofuncs },
+       {},
+};
+
+/*******************************************************************************
+ * PBSP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_bsp_cclass = {
+       .handle = NV_ENGCTX(BSP, 0xe0),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = _nouveau_falcon_context_ctor,
+               .dtor = _nouveau_falcon_context_dtor,
+               .init = _nouveau_falcon_context_init,
+               .fini = _nouveau_falcon_context_fini,
+               .rd32 = _nouveau_falcon_context_rd32,
+               .wr32 = _nouveau_falcon_context_wr32,
+       },
+};
+
+/*******************************************************************************
+ * PBSP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nve0_bsp_init(struct nouveau_object *object)
+{
+       struct nve0_bsp_priv *priv = (void *)object;
+       int ret;
+
+       ret = nouveau_falcon_init(&priv->base);
+       if (ret)
+               return ret;
+
+       nv_wr32(priv, 0x084010, 0x0000fff2);
+       nv_wr32(priv, 0x08401c, 0x0000fff2);
+       return 0;
+}
+
+static int
+nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+             struct nouveau_oclass *oclass, void *data, u32 size,
+             struct nouveau_object **pobject)
+{
+       struct nve0_bsp_priv *priv;
+       int ret;
+
+       ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
+                                   "PBSP", "bsp", &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_subdev(priv)->unit = 0x00008000;
+       nv_engine(priv)->cclass = &nve0_bsp_cclass;
+       nv_engine(priv)->sclass = nve0_bsp_sclass;
+       return 0;
+}
+
+struct nouveau_oclass
+nve0_bsp_oclass = {
+       .handle = NV_ENGINE(BSP, 0xe0),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nve0_bsp_ctor,
+               .dtor = _nouveau_falcon_dtor,
+               .init = nve0_bsp_init,
+               .fini = _nouveau_falcon_fini,
+               .rd32 = _nouveau_falcon_rd32,
+               .wr32 = _nouveau_falcon_wr32,
+       },
+};
index 4df6da0..283248c 100644 (file)
  * Authors: Ben Skeggs
  */
 
-#include <core/os.h>
-#include <core/enum.h>
+#include <core/falcon.h>
 #include <core/class.h>
-#include <core/engctx.h>
+#include <core/enum.h>
 
 #include <subdev/fb.h>
 #include <subdev/vm.h>
 #include "fuc/nva3.fuc.h"
 
 struct nva3_copy_priv {
-       struct nouveau_copy base;
-};
-
-struct nva3_copy_chan {
-       struct nouveau_copy_chan base;
+       struct nouveau_falcon base;
 };
 
 /*******************************************************************************
@@ -57,34 +52,16 @@ nva3_copy_sclass[] = {
  * PCOPY context
  ******************************************************************************/
 
-static int
-nva3_copy_context_ctor(struct nouveau_object *parent,
-                      struct nouveau_object *engine,
-                      struct nouveau_oclass *oclass, void *data, u32 size,
-                      struct nouveau_object **pobject)
-{
-       struct nva3_copy_chan *priv;
-       int ret;
-
-       ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, 0,
-                                         NVOBJ_FLAG_ZERO_ALLOC, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
 static struct nouveau_oclass
 nva3_copy_cclass = {
        .handle = NV_ENGCTX(COPY0, 0xa3),
        .ofuncs = &(struct nouveau_ofuncs) {
-               .ctor = nva3_copy_context_ctor,
-               .dtor = _nouveau_copy_context_dtor,
-               .init = _nouveau_copy_context_init,
-               .fini = _nouveau_copy_context_fini,
-               .rd32 = _nouveau_copy_context_rd32,
-               .wr32 = _nouveau_copy_context_wr32,
+               .ctor = _nouveau_falcon_context_ctor,
+               .dtor = _nouveau_falcon_context_dtor,
+               .init = _nouveau_falcon_context_init,
+               .fini = _nouveau_falcon_context_fini,
+               .rd32 = _nouveau_falcon_context_rd32,
+               .wr32 = _nouveau_falcon_context_wr32,
 
        },
 };
@@ -100,41 +77,40 @@ static const struct nouveau_enum nva3_copy_isr_error_name[] = {
        {}
 };
 
-static void
+void
 nva3_copy_intr(struct nouveau_subdev *subdev)
 {
        struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
        struct nouveau_engine *engine = nv_engine(subdev);
+       struct nouveau_falcon *falcon = (void *)subdev;
        struct nouveau_object *engctx;
-       struct nva3_copy_priv *priv = (void *)subdev;
-       u32 dispatch = nv_rd32(priv, 0x10401c);
-       u32 stat = nv_rd32(priv, 0x104008) & dispatch & ~(dispatch >> 16);
-       u64 inst = nv_rd32(priv, 0x104050) & 0x3fffffff;
-       u32 ssta = nv_rd32(priv, 0x104040) & 0x0000ffff;
-       u32 addr = nv_rd32(priv, 0x104040) >> 16;
+       u32 dispatch = nv_ro32(falcon, 0x01c);
+       u32 stat = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
+       u64 inst = nv_ro32(falcon, 0x050) & 0x3fffffff;
+       u32 ssta = nv_ro32(falcon, 0x040) & 0x0000ffff;
+       u32 addr = nv_ro32(falcon, 0x040) >> 16;
        u32 mthd = (addr & 0x07ff) << 2;
        u32 subc = (addr & 0x3800) >> 11;
-       u32 data = nv_rd32(priv, 0x104044);
+       u32 data = nv_ro32(falcon, 0x044);
        int chid;
 
        engctx = nouveau_engctx_get(engine, inst);
        chid   = pfifo->chid(pfifo, engctx);
 
        if (stat & 0x00000040) {
-               nv_error(priv, "DISPATCH_ERROR [");
+               nv_error(falcon, "DISPATCH_ERROR [");
                nouveau_enum_print(nva3_copy_isr_error_name, ssta);
                printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
                       chid, inst << 12, subc, mthd, data);
-               nv_wr32(priv, 0x104004, 0x00000040);
+               nv_wo32(falcon, 0x004, 0x00000040);
                stat &= ~0x00000040;
        }
 
        if (stat) {
-               nv_error(priv, "unhandled intr 0x%08x\n", stat);
-               nv_wr32(priv, 0x104004, stat);
+               nv_error(falcon, "unhandled intr 0x%08x\n", stat);
+               nv_wo32(falcon, 0x004, stat);
        }
 
-       nv50_fb_trap(nouveau_fb(priv), 1);
        nouveau_engctx_put(engctx);
 }
 
@@ -154,7 +130,8 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nva3_copy_priv *priv;
        int ret;
 
-       ret = nouveau_copy_create(parent, engine, oclass, enable, 0, &priv);
+       ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, enable,
+                                   "PCE0", "copy0", &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
@@ -164,59 +141,22 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        nv_engine(priv)->cclass = &nva3_copy_cclass;
        nv_engine(priv)->sclass = nva3_copy_sclass;
        nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush;
+       nv_falcon(priv)->code.data = nva3_pcopy_code;
+       nv_falcon(priv)->code.size = sizeof(nva3_pcopy_code);
+       nv_falcon(priv)->data.data = nva3_pcopy_data;
+       nv_falcon(priv)->data.size = sizeof(nva3_pcopy_data);
        return 0;
 }
 
-static int
-nva3_copy_init(struct nouveau_object *object)
-{
-       struct nva3_copy_priv *priv = (void *)object;
-       int ret, i;
-
-       ret = nouveau_copy_init(&priv->base);
-       if (ret)
-               return ret;
-
-       /* disable all interrupts */
-       nv_wr32(priv, 0x104014, 0xffffffff);
-
-       /* upload ucode */
-       nv_wr32(priv, 0x1041c0, 0x01000000);
-       for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
-               nv_wr32(priv, 0x1041c4, nva3_pcopy_data[i]);
-
-       nv_wr32(priv, 0x104180, 0x01000000);
-       for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
-               if ((i & 0x3f) == 0)
-                       nv_wr32(priv, 0x104188, i >> 6);
-               nv_wr32(priv, 0x104184, nva3_pcopy_code[i]);
-       }
-
-       /* start it running */
-       nv_wr32(priv, 0x10410c, 0x00000000);
-       nv_wr32(priv, 0x104104, 0x00000000); /* ENTRY */
-       nv_wr32(priv, 0x104100, 0x00000002); /* TRIGGER */
-       return 0;
-}
-
-static int
-nva3_copy_fini(struct nouveau_object *object, bool suspend)
-{
-       struct nva3_copy_priv *priv = (void *)object;
-
-       nv_mask(priv, 0x104048, 0x00000003, 0x00000000);
-       nv_wr32(priv, 0x104014, 0xffffffff);
-
-       return nouveau_copy_fini(&priv->base, suspend);
-}
-
 struct nouveau_oclass
 nva3_copy_oclass = {
        .handle = NV_ENGINE(COPY0, 0xa3),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nva3_copy_ctor,
-               .dtor = _nouveau_copy_dtor,
-               .init = nva3_copy_init,
-               .fini = nva3_copy_fini,
+               .dtor = _nouveau_falcon_dtor,
+               .init = _nouveau_falcon_init,
+               .fini = _nouveau_falcon_fini,
+               .rd32 = _nouveau_falcon_rd32,
+               .wr32 = _nouveau_falcon_wr32,
        },
 };
index 06d4a87..b3ed273 100644 (file)
  * Authors: Ben Skeggs
  */
 
-#include <core/os.h>
-#include <core/enum.h>
+#include <core/falcon.h>
 #include <core/class.h>
-#include <core/engctx.h>
+#include <core/enum.h>
 
 #include <engine/fifo.h>
 #include <engine/copy.h>
 #include "fuc/nvc0.fuc.h"
 
 struct nvc0_copy_priv {
-       struct nouveau_copy base;
-};
-
-struct nvc0_copy_chan {
-       struct nouveau_copy_chan base;
+       struct nouveau_falcon base;
 };
 
 /*******************************************************************************
@@ -60,32 +55,14 @@ nvc0_copy1_sclass[] = {
  * PCOPY context
  ******************************************************************************/
 
-static int
-nvc0_copy_context_ctor(struct nouveau_object *parent,
-                      struct nouveau_object *engine,
-                      struct nouveau_oclass *oclass, void *data, u32 size,
-                      struct nouveau_object **pobject)
-{
-       struct nvc0_copy_chan *priv;
-       int ret;
-
-       ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
-                                         256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
 static struct nouveau_ofuncs
 nvc0_copy_context_ofuncs = {
-       .ctor = nvc0_copy_context_ctor,
-       .dtor = _nouveau_copy_context_dtor,
-       .init = _nouveau_copy_context_init,
-       .fini = _nouveau_copy_context_fini,
-       .rd32 = _nouveau_copy_context_rd32,
-       .wr32 = _nouveau_copy_context_wr32,
+       .ctor = _nouveau_falcon_context_ctor,
+       .dtor = _nouveau_falcon_context_dtor,
+       .init = _nouveau_falcon_context_init,
+       .fini = _nouveau_falcon_context_fini,
+       .rd32 = _nouveau_falcon_context_rd32,
+       .wr32 = _nouveau_falcon_context_wr32,
 };
 
 static struct nouveau_oclass
@@ -104,50 +81,18 @@ nvc0_copy1_cclass = {
  * PCOPY engine/subdev functions
  ******************************************************************************/
 
-static const struct nouveau_enum nvc0_copy_isr_error_name[] = {
-       { 0x0001, "ILLEGAL_MTHD" },
-       { 0x0002, "INVALID_ENUM" },
-       { 0x0003, "INVALID_BITFIELD" },
-       {}
-};
-
-static void
-nvc0_copy_intr(struct nouveau_subdev *subdev)
+static int
+nvc0_copy_init(struct nouveau_object *object)
 {
-       struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
-       struct nouveau_engine *engine = nv_engine(subdev);
-       struct nouveau_object *engctx;
-       int idx = nv_engidx(nv_object(subdev)) - NVDEV_ENGINE_COPY0;
-       struct nvc0_copy_priv *priv = (void *)subdev;
-       u32 disp = nv_rd32(priv, 0x10401c + (idx * 0x1000));
-       u32 intr = nv_rd32(priv, 0x104008 + (idx * 0x1000));
-       u32 stat = intr & disp & ~(disp >> 16);
-       u64 inst = nv_rd32(priv, 0x104050 + (idx * 0x1000)) & 0x0fffffff;
-       u32 ssta = nv_rd32(priv, 0x104040 + (idx * 0x1000)) & 0x0000ffff;
-       u32 addr = nv_rd32(priv, 0x104040 + (idx * 0x1000)) >> 16;
-       u32 mthd = (addr & 0x07ff) << 2;
-       u32 subc = (addr & 0x3800) >> 11;
-       u32 data = nv_rd32(priv, 0x104044 + (idx * 0x1000));
-       int chid;
-
-       engctx = nouveau_engctx_get(engine, inst);
-       chid   = pfifo->chid(pfifo, engctx);
-
-       if (stat & 0x00000040) {
-               nv_error(priv, "DISPATCH_ERROR [");
-               nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
-               printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
-                      chid, (u64)inst << 12, subc, mthd, data);
-               nv_wr32(priv, 0x104004 + (idx * 0x1000), 0x00000040);
-               stat &= ~0x00000040;
-       }
+       struct nvc0_copy_priv *priv = (void *)object;
+       int ret;
 
-       if (stat) {
-               nv_error(priv, "unhandled intr 0x%08x\n", stat);
-               nv_wr32(priv, 0x104004 + (idx * 0x1000), stat);
-       }
+       ret = nouveau_falcon_init(&priv->base);
+       if (ret)
+               return ret;
 
-       nouveau_engctx_put(engctx);
+       nv_wo32(priv, 0x084, nv_engidx(object) - NVDEV_ENGINE_COPY0);
+       return 0;
 }
 
 static int
@@ -161,15 +106,20 @@ nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (nv_rd32(parent, 0x022500) & 0x00000100)
                return -ENODEV;
 
-       ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
+       ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true,
+                                   "PCE0", "copy0", &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
 
        nv_subdev(priv)->unit = 0x00000040;
-       nv_subdev(priv)->intr = nvc0_copy_intr;
+       nv_subdev(priv)->intr = nva3_copy_intr;
        nv_engine(priv)->cclass = &nvc0_copy0_cclass;
        nv_engine(priv)->sclass = nvc0_copy0_sclass;
+       nv_falcon(priv)->code.data = nvc0_pcopy_code;
+       nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
+       nv_falcon(priv)->data.data = nvc0_pcopy_data;
+       nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
        return 0;
 }
 
@@ -184,72 +134,33 @@ nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (nv_rd32(parent, 0x022500) & 0x00000200)
                return -ENODEV;
 
-       ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
+       ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true,
+                                   "PCE1", "copy1", &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
 
        nv_subdev(priv)->unit = 0x00000080;
-       nv_subdev(priv)->intr = nvc0_copy_intr;
+       nv_subdev(priv)->intr = nva3_copy_intr;
        nv_engine(priv)->cclass = &nvc0_copy1_cclass;
        nv_engine(priv)->sclass = nvc0_copy1_sclass;
+       nv_falcon(priv)->code.data = nvc0_pcopy_code;
+       nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
+       nv_falcon(priv)->data.data = nvc0_pcopy_data;
+       nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
        return 0;
 }
 
-static int
-nvc0_copy_init(struct nouveau_object *object)
-{
-       int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
-       struct nvc0_copy_priv *priv = (void *)object;
-       int ret, i;
-
-       ret = nouveau_copy_init(&priv->base);
-       if (ret)
-               return ret;
-
-       /* disable all interrupts */
-       nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
-
-       /* upload ucode */
-       nv_wr32(priv, 0x1041c0 + (idx * 0x1000), 0x01000000);
-       for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
-               nv_wr32(priv, 0x1041c4 + (idx * 0x1000), nvc0_pcopy_data[i]);
-
-       nv_wr32(priv, 0x104180 + (idx * 0x1000), 0x01000000);
-       for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
-               if ((i & 0x3f) == 0)
-                       nv_wr32(priv, 0x104188 + (idx * 0x1000), i >> 6);
-               nv_wr32(priv, 0x104184 + (idx * 0x1000), nvc0_pcopy_code[i]);
-       }
-
-       /* start it running */
-       nv_wr32(priv, 0x104084 + (idx * 0x1000), idx);
-       nv_wr32(priv, 0x10410c + (idx * 0x1000), 0x00000000);
-       nv_wr32(priv, 0x104104 + (idx * 0x1000), 0x00000000); /* ENTRY */
-       nv_wr32(priv, 0x104100 + (idx * 0x1000), 0x00000002); /* TRIGGER */
-       return 0;
-}
-
-static int
-nvc0_copy_fini(struct nouveau_object *object, bool suspend)
-{
-       int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
-       struct nvc0_copy_priv *priv = (void *)object;
-
-       nv_mask(priv, 0x104048 + (idx * 0x1000), 0x00000003, 0x00000000);
-       nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
-
-       return nouveau_copy_fini(&priv->base, suspend);
-}
-
 struct nouveau_oclass
 nvc0_copy0_oclass = {
        .handle = NV_ENGINE(COPY0, 0xc0),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nvc0_copy0_ctor,
-               .dtor = _nouveau_copy_dtor,
+               .dtor = _nouveau_falcon_dtor,
                .init = nvc0_copy_init,
-               .fini = nvc0_copy_fini,
+               .fini = _nouveau_falcon_fini,
+               .rd32 = _nouveau_falcon_rd32,
+               .wr32 = _nouveau_falcon_wr32,
        },
 };
 
@@ -258,8 +169,10 @@ nvc0_copy1_oclass = {
        .handle = NV_ENGINE(COPY1, 0xc0),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nvc0_copy1_ctor,
-               .dtor = _nouveau_copy_dtor,
+               .dtor = _nouveau_falcon_dtor,
                .init = nvc0_copy_init,
-               .fini = nvc0_copy_fini,
+               .fini = _nouveau_falcon_fini,
+               .rd32 = _nouveau_falcon_rd32,
+               .wr32 = _nouveau_falcon_wr32,
        },
 };
index 2017c15..dbbe9e8 100644 (file)
 #include <engine/copy.h>
 
 struct nve0_copy_priv {
-       struct nouveau_copy base;
-};
-
-struct nve0_copy_chan {
-       struct nouveau_copy_chan base;
+       struct nouveau_engine base;
 };
 
 /*******************************************************************************
@@ -51,32 +47,14 @@ nve0_copy_sclass[] = {
  * PCOPY context
  ******************************************************************************/
 
-static int
-nve0_copy_context_ctor(struct nouveau_object *parent,
-                      struct nouveau_object *engine,
-                      struct nouveau_oclass *oclass, void *data, u32 size,
-                      struct nouveau_object **pobject)
-{
-       struct nve0_copy_chan *priv;
-       int ret;
-
-       ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
-                                         256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
 static struct nouveau_ofuncs
 nve0_copy_context_ofuncs = {
-       .ctor = nve0_copy_context_ctor,
-       .dtor = _nouveau_copy_context_dtor,
-       .init = _nouveau_copy_context_init,
-       .fini = _nouveau_copy_context_fini,
-       .rd32 = _nouveau_copy_context_rd32,
-       .wr32 = _nouveau_copy_context_wr32,
+       .ctor = _nouveau_engctx_ctor,
+       .dtor = _nouveau_engctx_dtor,
+       .init = _nouveau_engctx_init,
+       .fini = _nouveau_engctx_fini,
+       .rd32 = _nouveau_engctx_rd32,
+       .wr32 = _nouveau_engctx_wr32,
 };
 
 static struct nouveau_oclass
@@ -100,7 +78,8 @@ nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (nv_rd32(parent, 0x022500) & 0x00000100)
                return -ENODEV;
 
-       ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
+       ret = nouveau_engine_create(parent, engine, oclass, true,
+                                   "PCE0", "copy0", &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
@@ -122,7 +101,8 @@ nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (nv_rd32(parent, 0x022500) & 0x00000200)
                return -ENODEV;
 
-       ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
+       ret = nouveau_engine_create(parent, engine, oclass, true,
+                                   "PCE1", "copy1", &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
@@ -138,9 +118,9 @@ nve0_copy0_oclass = {
        .handle = NV_ENGINE(COPY0, 0xe0),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nve0_copy0_ctor,
-               .dtor = _nouveau_copy_dtor,
-               .init = _nouveau_copy_init,
-               .fini = _nouveau_copy_fini,
+               .dtor = _nouveau_engine_dtor,
+               .init = _nouveau_engine_init,
+               .fini = _nouveau_engine_fini,
        },
 };
 
@@ -149,8 +129,8 @@ nve0_copy1_oclass = {
        .handle = NV_ENGINE(COPY1, 0xe0),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nve0_copy1_ctor,
-               .dtor = _nouveau_copy_dtor,
-               .init = _nouveau_copy_init,
-               .fini = _nouveau_copy_fini,
+               .dtor = _nouveau_engine_dtor,
+               .init = _nouveau_engine_init,
+               .fini = _nouveau_engine_fini,
        },
 };
index 1d85e5b..b974905 100644 (file)
 #include <engine/crypt.h>
 
 struct nv84_crypt_priv {
-       struct nouveau_crypt base;
-};
-
-struct nv84_crypt_chan {
-       struct nouveau_crypt_chan base;
+       struct nouveau_engine base;
 };
 
 /*******************************************************************************
@@ -87,34 +83,16 @@ nv84_crypt_sclass[] = {
  * PCRYPT context
  ******************************************************************************/
 
-static int
-nv84_crypt_context_ctor(struct nouveau_object *parent,
-                       struct nouveau_object *engine,
-                       struct nouveau_oclass *oclass, void *data, u32 size,
-                       struct nouveau_object **pobject)
-{
-       struct nv84_crypt_chan *priv;
-       int ret;
-
-       ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
-                                          0, NVOBJ_FLAG_ZERO_ALLOC, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
 static struct nouveau_oclass
 nv84_crypt_cclass = {
        .handle = NV_ENGCTX(CRYPT, 0x84),
        .ofuncs = &(struct nouveau_ofuncs) {
-               .ctor = nv84_crypt_context_ctor,
-               .dtor = _nouveau_crypt_context_dtor,
-               .init = _nouveau_crypt_context_init,
-               .fini = _nouveau_crypt_context_fini,
-               .rd32 = _nouveau_crypt_context_rd32,
-               .wr32 = _nouveau_crypt_context_wr32,
+               .ctor = _nouveau_engctx_ctor,
+               .dtor = _nouveau_engctx_dtor,
+               .init = _nouveau_engctx_init,
+               .fini = _nouveau_engctx_fini,
+               .rd32 = _nouveau_engctx_rd32,
+               .wr32 = _nouveau_engctx_wr32,
        },
 };
 
@@ -157,7 +135,6 @@ nv84_crypt_intr(struct nouveau_subdev *subdev)
        nv_wr32(priv, 0x102130, stat);
        nv_wr32(priv, 0x10200c, 0x10);
 
-       nv50_fb_trap(nouveau_fb(priv), 1);
        nouveau_engctx_put(engctx);
 }
 
@@ -176,7 +153,8 @@ nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nv84_crypt_priv *priv;
        int ret;
 
-       ret = nouveau_crypt_create(parent, engine, oclass, &priv);
+       ret = nouveau_engine_create(parent, engine, oclass, true,
+                                   "PCRYPT", "crypt", &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
@@ -195,7 +173,7 @@ nv84_crypt_init(struct nouveau_object *object)
        struct nv84_crypt_priv *priv = (void *)object;
        int ret;
 
-       ret = nouveau_crypt_init(&priv->base);
+       ret = nouveau_engine_init(&priv->base);
        if (ret)
                return ret;
 
@@ -210,8 +188,8 @@ nv84_crypt_oclass = {
        .handle = NV_ENGINE(CRYPT, 0x84),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nv84_crypt_ctor,
-               .dtor = _nouveau_crypt_dtor,
+               .dtor = _nouveau_engine_dtor,
                .init = nv84_crypt_init,
-               .fini = _nouveau_crypt_fini,
+               .fini = _nouveau_engine_fini,
        },
 };
index 9e3876c..21986f3 100644 (file)
@@ -26,6 +26,7 @@
 #include <core/enum.h>
 #include <core/class.h>
 #include <core/engctx.h>
+#include <core/falcon.h>
 
 #include <subdev/timer.h>
 #include <subdev/fb.h>
 #include "fuc/nv98.fuc.h"
 
 struct nv98_crypt_priv {
-       struct nouveau_crypt base;
-};
-
-struct nv98_crypt_chan {
-       struct nouveau_crypt_chan base;
+       struct nouveau_falcon base;
 };
 
 /*******************************************************************************
@@ -57,34 +54,16 @@ nv98_crypt_sclass[] = {
  * PCRYPT context
  ******************************************************************************/
 
-static int
-nv98_crypt_context_ctor(struct nouveau_object *parent,
-                       struct nouveau_object *engine,
-                       struct nouveau_oclass *oclass, void *data, u32 size,
-                       struct nouveau_object **pobject)
-{
-       struct nv98_crypt_chan *priv;
-       int ret;
-
-       ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
-                                          256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
 static struct nouveau_oclass
 nv98_crypt_cclass = {
        .handle = NV_ENGCTX(CRYPT, 0x98),
        .ofuncs = &(struct nouveau_ofuncs) {
-               .ctor = nv98_crypt_context_ctor,
-               .dtor = _nouveau_crypt_context_dtor,
-               .init = _nouveau_crypt_context_init,
-               .fini = _nouveau_crypt_context_fini,
-               .rd32 = _nouveau_crypt_context_rd32,
-               .wr32 = _nouveau_crypt_context_wr32,
+               .ctor = _nouveau_falcon_context_ctor,
+               .dtor = _nouveau_falcon_context_dtor,
+               .init = _nouveau_falcon_context_init,
+               .fini = _nouveau_falcon_context_fini,
+               .rd32 = _nouveau_falcon_context_rd32,
+               .wr32 = _nouveau_falcon_context_wr32,
        },
 };
 
@@ -134,7 +113,6 @@ nv98_crypt_intr(struct nouveau_subdev *subdev)
                nv_wr32(priv, 0x087004, stat);
        }
 
-       nv50_fb_trap(nouveau_fb(priv), 1);
        nouveau_engctx_put(engctx);
 }
 
@@ -153,7 +131,8 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nv98_crypt_priv *priv;
        int ret;
 
-       ret = nouveau_crypt_create(parent, engine, oclass, &priv);
+       ret = nouveau_falcon_create(parent, engine, oclass, 0x087000, true,
+                                   "PCRYPT", "crypt", &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
@@ -163,36 +142,10 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        nv_engine(priv)->cclass = &nv98_crypt_cclass;
        nv_engine(priv)->sclass = nv98_crypt_sclass;
        nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush;
-       return 0;
-}
-
-static int
-nv98_crypt_init(struct nouveau_object *object)
-{
-       struct nv98_crypt_priv *priv = (void *)object;
-       int ret, i;
-
-       ret = nouveau_crypt_init(&priv->base);
-       if (ret)
-               return ret;
-
-       /* wait for exit interrupt to signal */
-       nv_wait(priv, 0x087008, 0x00000010, 0x00000010);
-       nv_wr32(priv, 0x087004, 0x00000010);
-
-       /* upload microcode code and data segments */
-       nv_wr32(priv, 0x087ff8, 0x00100000);
-       for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
-               nv_wr32(priv, 0x087ff4, nv98_pcrypt_code[i]);
-
-       nv_wr32(priv, 0x087ff8, 0x00000000);
-       for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
-               nv_wr32(priv, 0x087ff4, nv98_pcrypt_data[i]);
-
-       /* start it running */
-       nv_wr32(priv, 0x08710c, 0x00000000);
-       nv_wr32(priv, 0x087104, 0x00000000); /* ENTRY */
-       nv_wr32(priv, 0x087100, 0x00000002); /* TRIGGER */
+       nv_falcon(priv)->code.data = nv98_pcrypt_code;
+       nv_falcon(priv)->code.size = sizeof(nv98_pcrypt_code);
+       nv_falcon(priv)->data.data = nv98_pcrypt_data;
+       nv_falcon(priv)->data.size = sizeof(nv98_pcrypt_data);
        return 0;
 }
 
@@ -201,8 +154,10 @@ nv98_crypt_oclass = {
        .handle = NV_ENGINE(CRYPT, 0x98),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nv98_crypt_ctor,
-               .dtor = _nouveau_crypt_dtor,
-               .init = nv98_crypt_init,
-               .fini = _nouveau_crypt_fini,
+               .dtor = _nouveau_falcon_dtor,
+               .init = _nouveau_falcon_init,
+               .fini = _nouveau_falcon_fini,
+               .rd32 = _nouveau_falcon_rd32,
+               .wr32 = _nouveau_falcon_wr32,
        },
 };
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
new file mode 100644 (file)
index 0000000..d0817d9
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+
+#include "nv50.h"
+
+int
+nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data)
+{
+       const u32 stat = (data & NV50_DISP_DAC_PWR_HSYNC) |
+                        (data & NV50_DISP_DAC_PWR_VSYNC) |
+                        (data & NV50_DISP_DAC_PWR_DATA) |
+                        (data & NV50_DISP_DAC_PWR_STATE);
+       const u32 doff = (or * 0x800);
+       nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+       nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
+       nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+       return 0;
+}
+
+int
+nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
+{
+       const u32 doff = (or * 0x800);
+       int load = -EINVAL;
+       nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
+       udelay(9500);
+       nv_wr32(priv, 0x61a00c + doff, 0x80000000);
+       load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27;
+       nv_wr32(priv, 0x61a00c + doff, 0x00000000);
+       return load;
+}
+
+int
+nv50_dac_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       const u8 or = (mthd & NV50_DISP_DAC_MTHD_OR);
+       u32 *data = args;
+       int ret;
+
+       if (size < sizeof(u32))
+               return -EINVAL;
+
+       switch (mthd & ~0x3f) {
+       case NV50_DISP_DAC_PWR:
+               ret = priv->dac.power(priv, or, data[0]);
+               break;
+       case NV50_DISP_DAC_LOAD:
+               ret = priv->dac.sense(priv, or, data[0]);
+               if (ret >= 0) {
+                       data[0] = ret;
+                       ret = 0;
+               }
+               break;
+       default:
+               BUG_ON(1);
+       }
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
new file mode 100644 (file)
index 0000000..373dbcc
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
+{
+       const u32 soff = (or * 0x800);
+       int i;
+
+       if (data && data[0]) {
+               for (i = 0; i < size; i++)
+                       nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]);
+               nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
+       } else
+       if (data) {
+               nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001);
+       } else {
+               nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000);
+       }
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
new file mode 100644 (file)
index 0000000..dc57e24
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+int
+nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
+{
+       const u32 soff = (or * 0x030);
+       int i;
+
+       if (data && data[0]) {
+               for (i = 0; i < size; i++)
+                       nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]);
+               nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
+       } else
+       if (data) {
+               nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001);
+       } else {
+               nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000);
+       }
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
new file mode 100644 (file)
index 0000000..0d36bdc
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+       const u32 hoff = (head * 0x800);
+
+       if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+               nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000);
+               nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
+               nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
+               return 0;
+       }
+
+       /* AVI InfoFrame */
+       nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
+       nv_wr32(priv, 0x616528 + hoff, 0x000d0282);
+       nv_wr32(priv, 0x61652c + hoff, 0x0000006f);
+       nv_wr32(priv, 0x616530 + hoff, 0x00000000);
+       nv_wr32(priv, 0x616534 + hoff, 0x00000000);
+       nv_wr32(priv, 0x616538 + hoff, 0x00000000);
+       nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000001);
+
+       /* Audio InfoFrame */
+       nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
+       nv_wr32(priv, 0x616508 + hoff, 0x000a0184);
+       nv_wr32(priv, 0x61650c + hoff, 0x00000071);
+       nv_wr32(priv, 0x616510 + hoff, 0x00000000);
+       nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001);
+
+       /* ??? */
+       nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+       nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+       nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+       /* HDMI_CTRL */
+       nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
new file mode 100644 (file)
index 0000000..f065fc2
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+       const u32 soff = (or * 0x800);
+
+       if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+               nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000);
+               nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
+               nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
+               return 0;
+       }
+
+       /* AVI InfoFrame */
+       nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
+       nv_wr32(priv, 0x61c528 + soff, 0x000d0282);
+       nv_wr32(priv, 0x61c52c + soff, 0x0000006f);
+       nv_wr32(priv, 0x61c530 + soff, 0x00000000);
+       nv_wr32(priv, 0x61c534 + soff, 0x00000000);
+       nv_wr32(priv, 0x61c538 + soff, 0x00000000);
+       nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000001);
+
+       /* Audio InfoFrame */
+       nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
+       nv_wr32(priv, 0x61c508 + soff, 0x000a0184);
+       nv_wr32(priv, 0x61c50c + soff, 0x00000071);
+       nv_wr32(priv, 0x61c510 + soff, 0x00000000);
+       nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001);
+
+       /* ??? */
+       nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+       nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+       nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+       /* HDMI_CTRL */
+       nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
new file mode 100644 (file)
index 0000000..5151bb2
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+       const u32 hoff = (head * 0x800);
+
+       if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+               nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
+               nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+               nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
+               return 0;
+       }
+
+       /* AVI InfoFrame */
+       nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
+       nv_wr32(priv, 0x61671c + hoff, 0x000d0282);
+       nv_wr32(priv, 0x616720 + hoff, 0x0000006f);
+       nv_wr32(priv, 0x616724 + hoff, 0x00000000);
+       nv_wr32(priv, 0x616728 + hoff, 0x00000000);
+       nv_wr32(priv, 0x61672c + hoff, 0x00000000);
+       nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000001);
+
+       /* ??? InfoFrame? */
+       nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+       nv_wr32(priv, 0x6167ac + hoff, 0x00000010);
+       nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001);
+
+       /* HDMI_CTRL */
+       nv_mask(priv, 0x616798 + hoff, 0x401f007f, data);
+
+       /* NFI, audio doesn't work without it though.. */
+       nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000);
+       return 0;
+}
index 16a9afb..0f09af1 100644 (file)
  * Authors: Ben Skeggs
  */
 
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/handle.h>
+#include <core/class.h>
+
 #include <engine/software.h>
 #include <engine/disp.h>
 
-struct nv50_disp_priv {
-       struct nouveau_disp base;
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/bar.h>
+#include <subdev/clock.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * EVO channel base class
+ ******************************************************************************/
+
+int
+nv50_disp_chan_create_(struct nouveau_object *parent,
+                      struct nouveau_object *engine,
+                      struct nouveau_oclass *oclass, int chid,
+                      int length, void **pobject)
+{
+       struct nv50_disp_base *base = (void *)parent;
+       struct nv50_disp_chan *chan;
+       int ret;
+
+       if (base->chan & (1 << chid))
+               return -EBUSY;
+       base->chan |= (1 << chid);
+
+       ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL,
+                                    (1ULL << NVDEV_ENGINE_DMAOBJ),
+                                    length, pobject);
+       chan = *pobject;
+       if (ret)
+               return ret;
+
+       chan->chid = chid;
+       return 0;
+}
+
+void
+nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
+{
+       struct nv50_disp_base *base = (void *)nv_object(chan)->parent;
+       base->chan &= ~(1 << chan->chid);
+       nouveau_namedb_destroy(&chan->base);
+}
+
+u32
+nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_chan *chan = (void *)object;
+       return nv_rd32(priv, 0x640000 + (chan->chid * 0x1000) + addr);
+}
+
+void
+nv50_disp_chan_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_chan *chan = (void *)object;
+       nv_wr32(priv, 0x640000 + (chan->chid * 0x1000) + addr, data);
+}
+
+/*******************************************************************************
+ * EVO DMA channel base class
+ ******************************************************************************/
+
+static int
+nv50_disp_dmac_object_attach(struct nouveau_object *parent,
+                            struct nouveau_object *object, u32 name)
+{
+       struct nv50_disp_base *base = (void *)parent->parent;
+       struct nv50_disp_chan *chan = (void *)parent;
+       u32 addr = nv_gpuobj(object)->node->offset;
+       u32 chid = chan->chid;
+       u32 data = (chid << 28) | (addr << 10) | chid;
+       return nouveau_ramht_insert(base->ramht, chid, name, data);
+}
+
+static void
+nv50_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
+{
+       struct nv50_disp_base *base = (void *)parent->parent;
+       nouveau_ramht_remove(base->ramht, cookie);
+}
+
+int
+nv50_disp_dmac_create_(struct nouveau_object *parent,
+                      struct nouveau_object *engine,
+                      struct nouveau_oclass *oclass, u32 pushbuf, int chid,
+                      int length, void **pobject)
+{
+       struct nv50_disp_dmac *dmac;
+       int ret;
+
+       ret = nv50_disp_chan_create_(parent, engine, oclass, chid,
+                                    length, pobject);
+       dmac = *pobject;
+       if (ret)
+               return ret;
+
+       dmac->pushdma = (void *)nouveau_handle_ref(parent, pushbuf);
+       if (!dmac->pushdma)
+               return -ENOENT;
+
+       switch (nv_mclass(dmac->pushdma)) {
+       case 0x0002:
+       case 0x003d:
+               if (dmac->pushdma->limit - dmac->pushdma->start != 0xfff)
+                       return -EINVAL;
+
+               switch (dmac->pushdma->target) {
+               case NV_MEM_TARGET_VRAM:
+                       dmac->push = 0x00000000 | dmac->pushdma->start >> 8;
+                       break;
+               case NV_MEM_TARGET_PCI_NOSNOOP:
+                       dmac->push = 0x00000003 | dmac->pushdma->start >> 8;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+void
+nv50_disp_dmac_dtor(struct nouveau_object *object)
+{
+       struct nv50_disp_dmac *dmac = (void *)object;
+       nouveau_object_ref(NULL, (struct nouveau_object **)&dmac->pushdma);
+       nv50_disp_chan_destroy(&dmac->base);
+}
+
+static int
+nv50_disp_dmac_init(struct nouveau_object *object)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_dmac *dmac = (void *)object;
+       int chid = dmac->base.chid;
+       int ret;
+
+       ret = nv50_disp_chan_init(&dmac->base);
+       if (ret)
+               return ret;
+
+       /* enable error reporting */
+       nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00010001 << chid);
+
+       /* initialise channel for dma command submission */
+       nv_wr32(priv, 0x610204 + (chid * 0x0010), dmac->push);
+       nv_wr32(priv, 0x610208 + (chid * 0x0010), 0x00010000);
+       nv_wr32(priv, 0x61020c + (chid * 0x0010), chid);
+       nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
+       nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
+       nv_wr32(priv, 0x610200 + (chid * 0x0010), 0x00000013);
+
+       /* wait for it to go inactive */
+       if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x80000000, 0x00000000)) {
+               nv_error(dmac, "init timeout, 0x%08x\n",
+                        nv_rd32(priv, 0x610200 + (chid * 0x10)));
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static int
+nv50_disp_dmac_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_dmac *dmac = (void *)object;
+       int chid = dmac->base.chid;
+
+       /* deactivate channel */
+       nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
+       nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
+       if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x001e0000, 0x00000000)) {
+               nv_error(dmac, "fini timeout, 0x%08x\n",
+                        nv_rd32(priv, 0x610200 + (chid * 0x10)));
+               if (suspend)
+                       return -EBUSY;
+       }
+
+       /* disable error reporting */
+       nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
+
+       return nv50_disp_chan_fini(&dmac->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+static int
+nv50_disp_mast_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_display_mast_class *args = data;
+       struct nv50_disp_dmac *mast;
+       int ret;
+
+       if (size < sizeof(*args))
+               return -EINVAL;
+
+       ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+                                    0, sizeof(*mast), (void **)&mast);
+       *pobject = nv_object(mast);
+       if (ret)
+               return ret;
+
+       nv_parent(mast)->object_attach = nv50_disp_dmac_object_attach;
+       nv_parent(mast)->object_detach = nv50_disp_dmac_object_detach;
+       return 0;
+}
+
+static int
+nv50_disp_mast_init(struct nouveau_object *object)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_dmac *mast = (void *)object;
+       int ret;
+
+       ret = nv50_disp_chan_init(&mast->base);
+       if (ret)
+               return ret;
+
+       /* enable error reporting */
+       nv_mask(priv, 0x610028, 0x00010001, 0x00010001);
+
+       /* attempt to unstick channel from some unknown state */
+       if ((nv_rd32(priv, 0x610200) & 0x009f0000) == 0x00020000)
+               nv_mask(priv, 0x610200, 0x00800000, 0x00800000);
+       if ((nv_rd32(priv, 0x610200) & 0x003f0000) == 0x00030000)
+               nv_mask(priv, 0x610200, 0x00600000, 0x00600000);
+
+       /* initialise channel for dma command submission */
+       nv_wr32(priv, 0x610204, mast->push);
+       nv_wr32(priv, 0x610208, 0x00010000);
+       nv_wr32(priv, 0x61020c, 0x00000000);
+       nv_mask(priv, 0x610200, 0x00000010, 0x00000010);
+       nv_wr32(priv, 0x640000, 0x00000000);
+       nv_wr32(priv, 0x610200, 0x01000013);
+
+       /* wait for it to go inactive */
+       if (!nv_wait(priv, 0x610200, 0x80000000, 0x00000000)) {
+               nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610200));
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static int
+nv50_disp_mast_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_dmac *mast = (void *)object;
+
+       /* deactivate channel */
+       nv_mask(priv, 0x610200, 0x00000010, 0x00000000);
+       nv_mask(priv, 0x610200, 0x00000003, 0x00000000);
+       if (!nv_wait(priv, 0x610200, 0x001e0000, 0x00000000)) {
+               nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610200));
+               if (suspend)
+                       return -EBUSY;
+       }
+
+       /* disable error reporting */
+       nv_mask(priv, 0x610028, 0x00010001, 0x00000000);
+
+       return nv50_disp_chan_fini(&mast->base, suspend);
+}
+
+struct nouveau_ofuncs
+nv50_disp_mast_ofuncs = {
+       .ctor = nv50_disp_mast_ctor,
+       .dtor = nv50_disp_dmac_dtor,
+       .init = nv50_disp_mast_init,
+       .fini = nv50_disp_mast_fini,
+       .rd32 = nv50_disp_chan_rd32,
+       .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO sync channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_sync_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_display_sync_class *args = data;
+       struct nv50_disp_dmac *dmac;
+       int ret;
+
+       if (size < sizeof(*data) || args->head > 1)
+               return -EINVAL;
+
+       ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+                                    1 + args->head, sizeof(*dmac),
+                                    (void **)&dmac);
+       *pobject = nv_object(dmac);
+       if (ret)
+               return ret;
+
+       nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
+       nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
+       return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_sync_ofuncs = {
+       .ctor = nv50_disp_sync_ctor,
+       .dtor = nv50_disp_dmac_dtor,
+       .init = nv50_disp_dmac_init,
+       .fini = nv50_disp_dmac_fini,
+       .rd32 = nv50_disp_chan_rd32,
+       .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_ovly_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_display_ovly_class *args = data;
+       struct nv50_disp_dmac *dmac;
+       int ret;
+
+       if (size < sizeof(*data) || args->head > 1)
+               return -EINVAL;
+
+       ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+                                    3 + args->head, sizeof(*dmac),
+                                    (void **)&dmac);
+       *pobject = nv_object(dmac);
+       if (ret)
+               return ret;
+
+       nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
+       nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
+       return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_ovly_ofuncs = {
+       .ctor = nv50_disp_ovly_ctor,
+       .dtor = nv50_disp_dmac_dtor,
+       .init = nv50_disp_dmac_init,
+       .fini = nv50_disp_dmac_fini,
+       .rd32 = nv50_disp_chan_rd32,
+       .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO PIO channel base class
+ ******************************************************************************/
+
+static int
+nv50_disp_pioc_create_(struct nouveau_object *parent,
+                      struct nouveau_object *engine,
+                      struct nouveau_oclass *oclass, int chid,
+                      int length, void **pobject)
+{
+       return nv50_disp_chan_create_(parent, engine, oclass, chid,
+                                     length, pobject);
+}
+
+static void
+nv50_disp_pioc_dtor(struct nouveau_object *object)
+{
+       struct nv50_disp_pioc *pioc = (void *)object;
+       nv50_disp_chan_destroy(&pioc->base);
+}
+
+static int
+nv50_disp_pioc_init(struct nouveau_object *object)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_pioc *pioc = (void *)object;
+       int chid = pioc->base.chid;
+       int ret;
+
+       ret = nv50_disp_chan_init(&pioc->base);
+       if (ret)
+               return ret;
+
+       nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00002000);
+       if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00000000, 0x00000000)) {
+               nv_error(pioc, "timeout0: 0x%08x\n",
+                        nv_rd32(priv, 0x610200 + (chid * 0x10)));
+               return -EBUSY;
+       }
+
+       nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00000001);
+       if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00010000)) {
+               nv_error(pioc, "timeout1: 0x%08x\n",
+                        nv_rd32(priv, 0x610200 + (chid * 0x10)));
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static int
+nv50_disp_pioc_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_pioc *pioc = (void *)object;
+       int chid = pioc->base.chid;
+
+       nv_mask(priv, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
+       if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00000000)) {
+               nv_error(pioc, "timeout: 0x%08x\n",
+                        nv_rd32(priv, 0x610200 + (chid * 0x10)));
+               if (suspend)
+                       return -EBUSY;
+       }
+
+       return nv50_disp_chan_fini(&pioc->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO immediate overlay channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_oimm_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_display_oimm_class *args = data;
+       struct nv50_disp_pioc *pioc;
+       int ret;
+
+       if (size < sizeof(*args) || args->head > 1)
+               return -EINVAL;
+
+       ret = nv50_disp_pioc_create_(parent, engine, oclass, 5 + args->head,
+                                    sizeof(*pioc), (void **)&pioc);
+       *pobject = nv_object(pioc);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_oimm_ofuncs = {
+       .ctor = nv50_disp_oimm_ctor,
+       .dtor = nv50_disp_pioc_dtor,
+       .init = nv50_disp_pioc_init,
+       .fini = nv50_disp_pioc_fini,
+       .rd32 = nv50_disp_chan_rd32,
+       .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO cursor channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_curs_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_display_curs_class *args = data;
+       struct nv50_disp_pioc *pioc;
+       int ret;
+
+       if (size < sizeof(*args) || args->head > 1)
+               return -EINVAL;
+
+       ret = nv50_disp_pioc_create_(parent, engine, oclass, 7 + args->head,
+                                    sizeof(*pioc), (void **)&pioc);
+       *pobject = nv_object(pioc);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_curs_ofuncs = {
+       .ctor = nv50_disp_curs_ctor,
+       .dtor = nv50_disp_pioc_dtor,
+       .init = nv50_disp_pioc_init,
+       .fini = nv50_disp_pioc_fini,
+       .rd32 = nv50_disp_chan_rd32,
+       .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static int
+nv50_disp_base_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_disp_priv *priv = (void *)engine;
+       struct nv50_disp_base *base;
+       int ret;
+
+       ret = nouveau_parent_create(parent, engine, oclass, 0,
+                                   priv->sclass, 0, &base);
+       *pobject = nv_object(base);
+       if (ret)
+               return ret;
+
+       return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
+}
+
+static void
+nv50_disp_base_dtor(struct nouveau_object *object)
+{
+       struct nv50_disp_base *base = (void *)object;
+       nouveau_ramht_ref(NULL, &base->ramht);
+       nouveau_parent_destroy(&base->base);
+}
+
+static int
+nv50_disp_base_init(struct nouveau_object *object)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_base *base = (void *)object;
+       int ret, i;
+       u32 tmp;
+
+       ret = nouveau_parent_init(&base->base);
+       if (ret)
+               return ret;
+
+       /* The below segments of code copying values from one register to
+        * another appear to inform EVO of the display capabilities or
+        * something similar.  NFI what the 0x614004 caps are for..
+        */
+       tmp = nv_rd32(priv, 0x614004);
+       nv_wr32(priv, 0x610184, tmp);
+
+       /* ... CRTC caps */
+       for (i = 0; i < priv->head.nr; i++) {
+               tmp = nv_rd32(priv, 0x616100 + (i * 0x800));
+               nv_wr32(priv, 0x610190 + (i * 0x10), tmp);
+               tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
+               nv_wr32(priv, 0x610194 + (i * 0x10), tmp);
+               tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
+               nv_wr32(priv, 0x610198 + (i * 0x10), tmp);
+               tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
+               nv_wr32(priv, 0x61019c + (i * 0x10), tmp);
+       }
+
+       /* ... DAC caps */
+       for (i = 0; i < priv->dac.nr; i++) {
+               tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
+               nv_wr32(priv, 0x6101d0 + (i * 0x04), tmp);
+       }
+
+       /* ... SOR caps */
+       for (i = 0; i < priv->sor.nr; i++) {
+               tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
+               nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp);
+       }
+
+       /* ... EXT caps */
+       for (i = 0; i < 3; i++) {
+               tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
+               nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
+       }
+
+       /* steal display away from vbios, or something like that */
+       if (nv_rd32(priv, 0x610024) & 0x00000100) {
+               nv_wr32(priv, 0x610024, 0x00000100);
+               nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
+               if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
+                       nv_error(priv, "timeout acquiring display\n");
+                       return -EBUSY;
+               }
+       }
+
+       /* point at display engine memory area (hash table, objects) */
+       nv_wr32(priv, 0x610010, (nv_gpuobj(base->ramht)->addr >> 8) | 9);
+
+       /* enable supervisor interrupts, disable everything else */
+       nv_wr32(priv, 0x61002c, 0x00000370);
+       nv_wr32(priv, 0x610028, 0x00000000);
+       return 0;
+}
+
+static int
+nv50_disp_base_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_base *base = (void *)object;
+
+       /* disable all interrupts */
+       nv_wr32(priv, 0x610024, 0x00000000);
+       nv_wr32(priv, 0x610020, 0x00000000);
+
+       return nouveau_parent_fini(&base->base, suspend);
+}
+
+struct nouveau_ofuncs
+nv50_disp_base_ofuncs = {
+       .ctor = nv50_disp_base_ctor,
+       .dtor = nv50_disp_base_dtor,
+       .init = nv50_disp_base_init,
+       .fini = nv50_disp_base_fini,
+};
+
+static struct nouveau_omthds
+nv50_disp_base_omthds[] = {
+       { SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
+       { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+       { DAC_MTHD(NV50_DISP_DAC_PWR)         , nv50_dac_mthd },
+       { DAC_MTHD(NV50_DISP_DAC_LOAD)        , nv50_dac_mthd },
+       {},
+};
+
+static struct nouveau_oclass
+nv50_disp_base_oclass[] = {
+       { NV50_DISP_CLASS, &nv50_disp_base_ofuncs, nv50_disp_base_omthds },
+       {}
 };
 
 static struct nouveau_oclass
 nv50_disp_sclass[] = {
-       {},
+       { NV50_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+       { NV50_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+       { NV50_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+       { NV50_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+       { NV50_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+       {}
+};
+
+/*******************************************************************************
+ * Display context, tracks instmem allocation and prevents more than one
+ * client using the display hardware at any time.
+ ******************************************************************************/
+
+static int
+nv50_disp_data_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_disp_priv *priv = (void *)engine;
+       struct nouveau_engctx *ectx;
+       int ret = -EBUSY;
+
+       /* no context needed for channel objects... */
+       if (nv_mclass(parent) != NV_DEVICE_CLASS) {
+               atomic_inc(&parent->refcount);
+               *pobject = parent;
+               return 0;
+       }
+
+       /* allocate display hardware to client */
+       mutex_lock(&nv_subdev(priv)->mutex);
+       if (list_empty(&nv_engine(priv)->contexts)) {
+               ret = nouveau_engctx_create(parent, engine, oclass, NULL,
+                                           0x10000, 0x10000,
+                                           NVOBJ_FLAG_HEAP, &ectx);
+               *pobject = nv_object(ectx);
+       }
+       mutex_unlock(&nv_subdev(priv)->mutex);
+       return ret;
+}
+
+struct nouveau_oclass
+nv50_disp_cclass = {
+       .handle = NV_ENGCTX(DISP, 0x50),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv50_disp_data_ctor,
+               .dtor = _nouveau_engctx_dtor,
+               .init = _nouveau_engctx_init,
+               .fini = _nouveau_engctx_fini,
+               .rd32 = _nouveau_engctx_rd32,
+               .wr32 = _nouveau_engctx_wr32,
+       },
 };
 
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static void
+nv50_disp_intr_error(struct nv50_disp_priv *priv)
+{
+       u32 channels = (nv_rd32(priv, 0x610020) & 0x001f0000) >> 16;
+       u32 addr, data;
+       int chid;
+
+       for (chid = 0; chid < 5; chid++) {
+               if (!(channels & (1 << chid)))
+                       continue;
+
+               nv_wr32(priv, 0x610020, 0x00010000 << chid);
+               addr = nv_rd32(priv, 0x610080 + (chid * 0x08));
+               data = nv_rd32(priv, 0x610084 + (chid * 0x08));
+               nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000);
+
+               nv_error(priv, "chid %d mthd 0x%04x data 0x%08x 0x%08x\n",
+                        chid, addr & 0xffc, data, addr);
+       }
+}
+
 static void
 nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
 {
+       struct nouveau_bar *bar = nouveau_bar(priv);
        struct nouveau_disp *disp = &priv->base;
        struct nouveau_software_chan *chan, *temp;
        unsigned long flags;
@@ -46,19 +769,25 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
                if (chan->vblank.crtc != crtc)
                        continue;
 
-               nv_wr32(priv, 0x001704, chan->vblank.channel);
-               nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
-
-               if (nv_device(priv)->chipset == 0x50) {
-                       nv_wr32(priv, 0x001570, chan->vblank.offset);
-                       nv_wr32(priv, 0x001574, chan->vblank.value);
+               if (nv_device(priv)->chipset >= 0xc0) {
+                       nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
+                       bar->flush(bar);
+                       nv_wr32(priv, 0x06000c,
+                               upper_32_bits(chan->vblank.offset));
+                       nv_wr32(priv, 0x060010,
+                               lower_32_bits(chan->vblank.offset));
+                       nv_wr32(priv, 0x060014, chan->vblank.value);
                } else {
-                       if (nv_device(priv)->chipset >= 0xc0) {
-                               nv_wr32(priv, 0x06000c,
-                                       upper_32_bits(chan->vblank.offset));
+                       nv_wr32(priv, 0x001704, chan->vblank.channel);
+                       nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
+                       bar->flush(bar);
+                       if (nv_device(priv)->chipset == 0x50) {
+                               nv_wr32(priv, 0x001570, chan->vblank.offset);
+                               nv_wr32(priv, 0x001574, chan->vblank.value);
+                       } else {
+                               nv_wr32(priv, 0x060010, chan->vblank.offset);
+                               nv_wr32(priv, 0x060014, chan->vblank.value);
                        }
-                       nv_wr32(priv, 0x060010, chan->vblank.offset);
-                       nv_wr32(priv, 0x060014, chan->vblank.value);
                }
 
                list_del(&chan->vblank.head);
@@ -71,30 +800,422 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
                disp->vblank.notify(disp->vblank.data, crtc);
 }
 
+static u16
+exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
+           struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+           struct nvbios_outp *info)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       u16 mask, type, data;
+
+       if (outp < 4) {
+               type = DCB_OUTPUT_ANALOG;
+               mask = 0;
+       } else {
+               outp -= 4;
+               switch (ctrl & 0x00000f00) {
+               case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
+               case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
+               case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
+               case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
+               case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
+               case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
+               default:
+                       nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
+                       return 0x0000;
+               }
+       }
+
+       mask  = 0x00c0 & (mask << 6);
+       mask |= 0x0001 << outp;
+       mask |= 0x0100 << head;
+
+       data = dcb_outp_match(bios, type, mask, ver, hdr, dcb);
+       if (!data)
+               return 0x0000;
+
+       return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
+}
+
+static bool
+exec_script(struct nv50_disp_priv *priv, int head, int id)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       struct nvbios_outp info;
+       struct dcb_output dcb;
+       u8  ver, hdr, cnt, len;
+       u16 data;
+       u32 ctrl = 0x00000000;
+       int i;
+
+       for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+               ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
+
+       if (nv_device(priv)->chipset  < 0x90 ||
+           nv_device(priv)->chipset == 0x92 ||
+           nv_device(priv)->chipset == 0xa0) {
+               for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+                       ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
+               i += 3;
+       } else {
+               for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+                       ctrl = nv_rd32(priv, 0x610798 + (i * 8));
+               i += 3;
+       }
+
+       if (!(ctrl & (1 << head)))
+               return false;
+
+       data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
+       if (data) {
+               struct nvbios_init init = {
+                       .subdev = nv_subdev(priv),
+                       .bios = bios,
+                       .offset = info.script[id],
+                       .outp = &dcb,
+                       .crtc = head,
+                       .execute = 1,
+               };
+
+               return nvbios_exec(&init) == 0;
+       }
+
+       return false;
+}
+
+static u32
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
+           struct dcb_output *outp)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       struct nvbios_outp info1;
+       struct nvbios_ocfg info2;
+       u8  ver, hdr, cnt, len;
+       u16 data, conf;
+       u32 ctrl = 0x00000000;
+       int i;
+
+       for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+               ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
+
+       if (nv_device(priv)->chipset  < 0x90 ||
+           nv_device(priv)->chipset == 0x92 ||
+           nv_device(priv)->chipset == 0xa0) {
+               for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+                       ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
+               i += 3;
+       } else {
+               for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+                       ctrl = nv_rd32(priv, 0x610794 + (i * 8));
+               i += 3;
+       }
+
+       if (!(ctrl & (1 << head)))
+               return 0x0000;
+
+       data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1);
+       if (!data)
+               return 0x0000;
+
+       switch (outp->type) {
+       case DCB_OUTPUT_TMDS:
+               conf = (ctrl & 0x00000f00) >> 8;
+               if (pclk >= 165000)
+                       conf |= 0x0100;
+               break;
+       case DCB_OUTPUT_LVDS:
+               conf = priv->sor.lvdsconf;
+               break;
+       case DCB_OUTPUT_DP:
+               conf = (ctrl & 0x00000f00) >> 8;
+               break;
+       case DCB_OUTPUT_ANALOG:
+       default:
+               conf = 0x00ff;
+               break;
+       }
+
+       data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
+       if (data) {
+               data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+               if (data) {
+                       struct nvbios_init init = {
+                               .subdev = nv_subdev(priv),
+                               .bios = bios,
+                               .offset = data,
+                               .outp = outp,
+                               .crtc = head,
+                               .execute = 1,
+                       };
+
+                       if (nvbios_exec(&init))
+                               return 0x0000;
+                       return conf;
+               }
+       }
+
+       return 0x0000;
+}
+
+static void
+nv50_disp_intr_unk10(struct nv50_disp_priv *priv, u32 super)
+{
+       int head = ffs((super & 0x00000060) >> 5) - 1;
+       if (head >= 0) {
+               head = ffs((super & 0x00000180) >> 7) - 1;
+               if (head >= 0)
+                       exec_script(priv, head, 1);
+       }
+
+       nv_wr32(priv, 0x610024, 0x00000010);
+       nv_wr32(priv, 0x610030, 0x80000000);
+}
+
+static void
+nv50_disp_intr_unk20_dp(struct nv50_disp_priv *priv,
+                       struct dcb_output *outp, u32 pclk)
+{
+       const int link = !(outp->sorconf.link & 1);
+       const int   or = ffs(outp->or) - 1;
+       const u32 soff = (  or * 0x800);
+       const u32 loff = (link * 0x080) + soff;
+       const u32 ctrl = nv_rd32(priv, 0x610794 + (or * 8));
+       const u32 symbol = 100000;
+       u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x0000f0000;
+       u32 clksor = nv_rd32(priv, 0x614300 + soff);
+       int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
+       int TU, VTUi, VTUf, VTUa;
+       u64 link_data_rate, link_ratio, unk;
+       u32 best_diff = 64 * symbol;
+       u32 link_nr, link_bw, bits, r;
+
+       /* calculate packed data rate for each lane */
+       if      (dpctrl > 0x00030000) link_nr = 4;
+       else if (dpctrl > 0x00010000) link_nr = 2;
+       else                          link_nr = 1;
+
+       if (clksor & 0x000c0000)
+               link_bw = 270000;
+       else
+               link_bw = 162000;
+
+       if      ((ctrl & 0xf0000) == 0x60000) bits = 30;
+       else if ((ctrl & 0xf0000) == 0x50000) bits = 24;
+       else                                  bits = 18;
+
+       link_data_rate = (pclk * bits / 8) / link_nr;
+
+       /* calculate ratio of packed data rate to link symbol rate */
+       link_ratio = link_data_rate * symbol;
+       r = do_div(link_ratio, link_bw);
+
+       for (TU = 64; TU >= 32; TU--) {
+               /* calculate average number of valid symbols in each TU */
+               u32 tu_valid = link_ratio * TU;
+               u32 calc, diff;
+
+               /* find a hw representation for the fraction.. */
+               VTUi = tu_valid / symbol;
+               calc = VTUi * symbol;
+               diff = tu_valid - calc;
+               if (diff) {
+                       if (diff >= (symbol / 2)) {
+                               VTUf = symbol / (symbol - diff);
+                               if (symbol - (VTUf * diff))
+                                       VTUf++;
+
+                               if (VTUf <= 15) {
+                                       VTUa  = 1;
+                                       calc += symbol - (symbol / VTUf);
+                               } else {
+                                       VTUa  = 0;
+                                       VTUf  = 1;
+                                       calc += symbol;
+                               }
+                       } else {
+                               VTUa  = 0;
+                               VTUf  = min((int)(symbol / diff), 15);
+                               calc += symbol / VTUf;
+                       }
+
+                       diff = calc - tu_valid;
+               } else {
+                       /* no remainder, but the hw doesn't like the fractional
+                        * part to be zero.  decrement the integer part and
+                        * have the fraction add a whole symbol back
+                        */
+                       VTUa = 0;
+                       VTUf = 1;
+                       VTUi--;
+               }
+
+               if (diff < best_diff) {
+                       best_diff = diff;
+                       bestTU = TU;
+                       bestVTUa = VTUa;
+                       bestVTUf = VTUf;
+                       bestVTUi = VTUi;
+                       if (diff == 0)
+                               break;
+               }
+       }
+
+       if (!bestTU) {
+               nv_error(priv, "unable to find suitable dp config\n");
+               return;
+       }
+
+       /* XXX close to vbios numbers, but not right */
+       unk  = (symbol - link_ratio) * bestTU;
+       unk *= link_ratio;
+       r = do_div(unk, symbol);
+       r = do_div(unk, symbol);
+       unk += 6;
+
+       nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2);
+       nv_mask(priv, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
+                                                  bestVTUf << 16 |
+                                                  bestVTUi << 8 | unk);
+}
+
+static void
+nv50_disp_intr_unk20(struct nv50_disp_priv *priv, u32 super)
+{
+       struct dcb_output outp;
+       u32 addr, mask, data;
+       int head;
+
+       /* finish detaching encoder? */
+       head = ffs((super & 0x00000180) >> 7) - 1;
+       if (head >= 0)
+               exec_script(priv, head, 2);
+
+       /* check whether a vpll change is required */
+       head = ffs((super & 0x00000600) >> 9) - 1;
+       if (head >= 0) {
+               u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+               if (pclk) {
+                       struct nouveau_clock *clk = nouveau_clock(priv);
+                       clk->pll_set(clk, PLL_VPLL0 + head, pclk);
+               }
+
+               nv_mask(priv, 0x614200 + head * 0x800, 0x0000000f, 0x00000000);
+       }
+
+       /* (re)attach the relevant OR to the head */
+       head = ffs((super & 0x00000180) >> 7) - 1;
+       if (head >= 0) {
+               u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+               u32 conf = exec_clkcmp(priv, head, 0, pclk, &outp);
+               if (conf) {
+                       if (outp.type == DCB_OUTPUT_ANALOG) {
+                               addr = 0x614280 + (ffs(outp.or) - 1) * 0x800;
+                               mask = 0xffffffff;
+                               data = 0x00000000;
+                       } else {
+                               if (outp.type == DCB_OUTPUT_DP)
+                                       nv50_disp_intr_unk20_dp(priv, &outp, pclk);
+                               addr = 0x614300 + (ffs(outp.or) - 1) * 0x800;
+                               mask = 0x00000707;
+                               data = (conf & 0x0100) ? 0x0101 : 0x0000;
+                       }
+
+                       nv_mask(priv, addr, mask, data);
+               }
+       }
+
+       nv_wr32(priv, 0x610024, 0x00000020);
+       nv_wr32(priv, 0x610030, 0x80000000);
+}
+
+/* If programming a TMDS output on a SOR that can also be configured for
+ * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
+ *
+ * It looks like the VBIOS TMDS scripts make an attempt at this, however,
+ * the VBIOS scripts on at least one board I have only switch it off on
+ * link 0, causing a blank display if the output has previously been
+ * programmed for DisplayPort.
+ */
+static void
+nv50_disp_intr_unk40_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       const int link = !(outp->sorconf.link & 1);
+       const int   or = ffs(outp->or) - 1;
+       const u32 loff = (or * 0x800) + (link * 0x80);
+       const u16 mask = (outp->sorconf.link << 6) | outp->or;
+       u8  ver, hdr;
+
+       if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp))
+               nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000);
+}
+
 static void
+nv50_disp_intr_unk40(struct nv50_disp_priv *priv, u32 super)
+{
+       int head = ffs((super & 0x00000180) >> 7) - 1;
+       if (head >= 0) {
+               struct dcb_output outp;
+               u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+               if (pclk && exec_clkcmp(priv, head, 1, pclk, &outp)) {
+                       if (outp.type == DCB_OUTPUT_TMDS)
+                               nv50_disp_intr_unk40_tmds(priv, &outp);
+               }
+       }
+
+       nv_wr32(priv, 0x610024, 0x00000040);
+       nv_wr32(priv, 0x610030, 0x80000000);
+}
+
+static void
+nv50_disp_intr_super(struct nv50_disp_priv *priv, u32 intr1)
+{
+       u32 super = nv_rd32(priv, 0x610030);
+
+       nv_debug(priv, "supervisor 0x%08x 0x%08x\n", intr1, super);
+
+       if (intr1 & 0x00000010)
+               nv50_disp_intr_unk10(priv, super);
+       if (intr1 & 0x00000020)
+               nv50_disp_intr_unk20(priv, super);
+       if (intr1 & 0x00000040)
+               nv50_disp_intr_unk40(priv, super);
+}
+
+void
 nv50_disp_intr(struct nouveau_subdev *subdev)
 {
        struct nv50_disp_priv *priv = (void *)subdev;
-       u32 stat1 = nv_rd32(priv, 0x610024);
+       u32 intr0 = nv_rd32(priv, 0x610020);
+       u32 intr1 = nv_rd32(priv, 0x610024);
 
-       if (stat1 & 0x00000004) {
+       if (intr0 & 0x001f0000) {
+               nv50_disp_intr_error(priv);
+               intr0 &= ~0x001f0000;
+       }
+
+       if (intr1 & 0x00000004) {
                nv50_disp_intr_vblank(priv, 0);
                nv_wr32(priv, 0x610024, 0x00000004);
-               stat1 &= ~0x00000004;
+               intr1 &= ~0x00000004;
        }
 
-       if (stat1 & 0x00000008) {
+       if (intr1 & 0x00000008) {
                nv50_disp_intr_vblank(priv, 1);
                nv_wr32(priv, 0x610024, 0x00000008);
-               stat1 &= ~0x00000008;
+               intr1 &= ~0x00000008;
        }
 
+       if (intr1 & 0x00000070) {
+               nv50_disp_intr_super(priv, intr1);
+               intr1 &= ~0x00000070;
+       }
 }
 
 static int
 nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-                 struct nouveau_oclass *oclass, void *data, u32 size,
-                 struct nouveau_object **pobject)
+              struct nouveau_oclass *oclass, void *data, u32 size,
+              struct nouveau_object **pobject)
 {
        struct nv50_disp_priv *priv;
        int ret;
@@ -105,8 +1226,16 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       nv_engine(priv)->sclass = nv50_disp_sclass;
+       nv_engine(priv)->sclass = nv50_disp_base_oclass;
+       nv_engine(priv)->cclass = &nv50_disp_cclass;
        nv_subdev(priv)->intr = nv50_disp_intr;
+       priv->sclass = nv50_disp_sclass;
+       priv->head.nr = 2;
+       priv->dac.nr = 3;
+       priv->sor.nr = 2;
+       priv->dac.power = nv50_dac_power;
+       priv->dac.sense = nv50_dac_sense;
+       priv->sor.power = nv50_sor_power;
 
        INIT_LIST_HEAD(&priv->base.vblank.list);
        spin_lock_init(&priv->base.vblank.lock);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
new file mode 100644 (file)
index 0000000..a6bb931
--- /dev/null
@@ -0,0 +1,142 @@
+#ifndef __NV50_DISP_H__
+#define __NV50_DISP_H__
+
+#include <core/parent.h>
+#include <core/namedb.h>
+#include <core/ramht.h>
+
+#include <engine/dmaobj.h>
+#include <engine/disp.h>
+
+struct dcb_output;
+
+struct nv50_disp_priv {
+       struct nouveau_disp base;
+       struct nouveau_oclass *sclass;
+       struct {
+               int nr;
+       } head;
+       struct {
+               int nr;
+               int (*power)(struct nv50_disp_priv *, int dac, u32 data);
+               int (*sense)(struct nv50_disp_priv *, int dac, u32 load);
+       } dac;
+       struct {
+               int nr;
+               int (*power)(struct nv50_disp_priv *, int sor, u32 data);
+               int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32);
+               int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32);
+               int (*dp_train_init)(struct nv50_disp_priv *, int sor, int link,
+                                    int head, u16 type, u16 mask, u32 data,
+                                    struct dcb_output *);
+               int (*dp_train_fini)(struct nv50_disp_priv *, int sor, int link,
+                                    int head, u16 type, u16 mask, u32 data,
+                                    struct dcb_output *);
+               int (*dp_train)(struct nv50_disp_priv *, int sor, int link,
+                               u16 type, u16 mask, u32 data,
+                               struct dcb_output *);
+               int (*dp_lnkctl)(struct nv50_disp_priv *, int sor, int link,
+                                int head, u16 type, u16 mask, u32 data,
+                                struct dcb_output *);
+               int (*dp_drvctl)(struct nv50_disp_priv *, int sor, int link,
+                                int lane, u16 type, u16 mask, u32 data,
+                                struct dcb_output *);
+               u32 lvdsconf;
+       } sor;
+};
+
+#define DAC_MTHD(n) (n), (n) + 0x03
+
+int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_dac_power(struct nv50_disp_priv *, int, u32);
+int nv50_dac_sense(struct nv50_disp_priv *, int, u32);
+
+#define SOR_MTHD(n) (n), (n) + 0x3f
+
+int nva3_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
+int nvd0_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
+
+int nv84_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+int nva3_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+int nvd0_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+
+int nv50_sor_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_sor_power(struct nv50_disp_priv *, int, u32);
+
+int nv94_sor_dp_train_init(struct nv50_disp_priv *, int, int, int, u16, u16,
+                          u32, struct dcb_output *);
+int nv94_sor_dp_train_fini(struct nv50_disp_priv *, int, int, int, u16, u16,
+                          u32, struct dcb_output *);
+int nv94_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
+                     struct dcb_output *);
+int nv94_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+                      struct dcb_output *);
+int nv94_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+                      struct dcb_output *);
+
+int nvd0_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
+                     struct dcb_output *);
+int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+                      struct dcb_output *);
+int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+                      struct dcb_output *);
+
+struct nv50_disp_base {
+       struct nouveau_parent base;
+       struct nouveau_ramht *ramht;
+       u32 chan;
+};
+
+struct nv50_disp_chan {
+       struct nouveau_namedb base;
+       int chid;
+};
+
+int  nv50_disp_chan_create_(struct nouveau_object *, struct nouveau_object *,
+                           struct nouveau_oclass *, int, int, void **);
+void nv50_disp_chan_destroy(struct nv50_disp_chan *);
+u32  nv50_disp_chan_rd32(struct nouveau_object *, u64);
+void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32);
+
+#define nv50_disp_chan_init(a)                                                 \
+       nouveau_namedb_init(&(a)->base)
+#define nv50_disp_chan_fini(a,b)                                               \
+       nouveau_namedb_fini(&(a)->base, (b))
+
+int  nv50_disp_dmac_create_(struct nouveau_object *, struct nouveau_object *,
+                           struct nouveau_oclass *, u32, int, int, void **);
+void nv50_disp_dmac_dtor(struct nouveau_object *);
+
+struct nv50_disp_dmac {
+       struct nv50_disp_chan base;
+       struct nouveau_dmaobj *pushdma;
+       u32 push;
+};
+
+struct nv50_disp_pioc {
+       struct nv50_disp_chan base;
+};
+
+extern struct nouveau_ofuncs nv50_disp_mast_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_sync_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_ovly_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_curs_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
+extern struct nouveau_oclass nv50_disp_cclass;
+void nv50_disp_intr(struct nouveau_subdev *);
+
+extern struct nouveau_omthds nv84_disp_base_omthds[];
+
+extern struct nouveau_omthds nva3_disp_base_omthds[];
+
+extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
+extern struct nouveau_oclass nvd0_disp_cclass;
+void nvd0_disp_intr(struct nouveau_subdev *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
new file mode 100644 (file)
index 0000000..fc84eac
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nv84_disp_sclass[] = {
+       { NV84_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+       { NV84_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+       { NV84_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+       { NV84_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+       { NV84_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+       {}
+};
+
+struct nouveau_omthds
+nv84_disp_base_omthds[] = {
+       { SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
+       { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR)    , nv50_sor_mthd },
+       { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+       { DAC_MTHD(NV50_DISP_DAC_PWR)         , nv50_dac_mthd },
+       { DAC_MTHD(NV50_DISP_DAC_LOAD)        , nv50_dac_mthd },
+       {},
+};
+
+static struct nouveau_oclass
+nv84_disp_base_oclass[] = {
+       { NV84_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
+       {}
+};
+
+static int
+nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+              struct nouveau_oclass *oclass, void *data, u32 size,
+              struct nouveau_object **pobject)
+{
+       struct nv50_disp_priv *priv;
+       int ret;
+
+       ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+                                 "display", &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_engine(priv)->sclass = nv84_disp_base_oclass;
+       nv_engine(priv)->cclass = &nv50_disp_cclass;
+       nv_subdev(priv)->intr = nv50_disp_intr;
+       priv->sclass = nv84_disp_sclass;
+       priv->head.nr = 2;
+       priv->dac.nr = 3;
+       priv->sor.nr = 2;
+       priv->dac.power = nv50_dac_power;
+       priv->dac.sense = nv50_dac_sense;
+       priv->sor.power = nv50_sor_power;
+       priv->sor.hdmi = nv84_hdmi_ctrl;
+
+       INIT_LIST_HEAD(&priv->base.vblank.list);
+       spin_lock_init(&priv->base.vblank.lock);
+       return 0;
+}
+
+struct nouveau_oclass
+nv84_disp_oclass = {
+       .handle = NV_ENGINE(DISP, 0x82),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv84_disp_ctor,
+               .dtor = _nouveau_disp_dtor,
+               .init = _nouveau_disp_init,
+               .fini = _nouveau_disp_fini,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
new file mode 100644 (file)
index 0000000..ba9dfd4
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nv94_disp_sclass[] = {
+       { NV94_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+       { NV94_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+       { NV94_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+       { NV94_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+       { NV94_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+       {}
+};
+
+static struct nouveau_omthds
+nv94_disp_base_omthds[] = {
+       { SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
+       { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR)    , nv50_sor_mthd },
+       { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN)    , nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL)   , nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
+       { DAC_MTHD(NV50_DISP_DAC_PWR)         , nv50_dac_mthd },
+       { DAC_MTHD(NV50_DISP_DAC_LOAD)        , nv50_dac_mthd },
+       {},
+};
+
+static struct nouveau_oclass
+nv94_disp_base_oclass[] = {
+       { NV94_DISP_CLASS, &nv50_disp_base_ofuncs, nv94_disp_base_omthds },
+       {}
+};
+
+static int
+nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+              struct nouveau_oclass *oclass, void *data, u32 size,
+              struct nouveau_object **pobject)
+{
+       struct nv50_disp_priv *priv;
+       int ret;
+
+       ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+                                 "display", &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_engine(priv)->sclass = nv94_disp_base_oclass;
+       nv_engine(priv)->cclass = &nv50_disp_cclass;
+       nv_subdev(priv)->intr = nv50_disp_intr;
+       priv->sclass = nv94_disp_sclass;
+       priv->head.nr = 2;
+       priv->dac.nr = 3;
+       priv->sor.nr = 4;
+       priv->dac.power = nv50_dac_power;
+       priv->dac.sense = nv50_dac_sense;
+       priv->sor.power = nv50_sor_power;
+       priv->sor.hdmi = nv84_hdmi_ctrl;
+       priv->sor.dp_train = nv94_sor_dp_train;
+       priv->sor.dp_train_init = nv94_sor_dp_train_init;
+       priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+       priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
+       priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
+
+       INIT_LIST_HEAD(&priv->base.vblank.list);
+       spin_lock_init(&priv->base.vblank.lock);
+       return 0;
+}
+
+struct nouveau_oclass
+nv94_disp_oclass = {
+       .handle = NV_ENGINE(DISP, 0x88),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv94_disp_ctor,
+               .dtor = _nouveau_disp_dtor,
+               .init = _nouveau_disp_init,
+               .fini = _nouveau_disp_fini,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
new file mode 100644 (file)
index 0000000..5d63902
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nva0_disp_sclass[] = {
+       { NVA0_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+       { NVA0_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+       { NVA0_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+       { NVA0_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+       { NVA0_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+       {}
+};
+
+static struct nouveau_oclass
+nva0_disp_base_oclass[] = {
+       { NVA0_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
+       {}
+};
+
+static int
+nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+              struct nouveau_oclass *oclass, void *data, u32 size,
+              struct nouveau_object **pobject)
+{
+       struct nv50_disp_priv *priv;
+       int ret;
+
+       ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+                                 "display", &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_engine(priv)->sclass = nva0_disp_base_oclass;
+       nv_engine(priv)->cclass = &nv50_disp_cclass;
+       nv_subdev(priv)->intr = nv50_disp_intr;
+       priv->sclass = nva0_disp_sclass;
+       priv->head.nr = 2;
+       priv->dac.nr = 3;
+       priv->sor.nr = 2;
+       priv->dac.power = nv50_dac_power;
+       priv->dac.sense = nv50_dac_sense;
+       priv->sor.power = nv50_sor_power;
+       priv->sor.hdmi = nv84_hdmi_ctrl;
+
+       INIT_LIST_HEAD(&priv->base.vblank.list);
+       spin_lock_init(&priv->base.vblank.lock);
+       return 0;
+}
+
+struct nouveau_oclass
+nva0_disp_oclass = {
+       .handle = NV_ENGINE(DISP, 0x83),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nva0_disp_ctor,
+               .dtor = _nouveau_disp_dtor,
+               .init = _nouveau_disp_init,
+               .fini = _nouveau_disp_fini,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
new file mode 100644 (file)
index 0000000..e9192ca
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nva3_disp_sclass[] = {
+       { NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+       { NVA3_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+       { NVA3_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+       { NVA3_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+       { NVA3_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+       {}
+};
+
+struct nouveau_omthds
+nva3_disp_base_omthds[] = {
+       { SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
+       { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD)     , nv50_sor_mthd },
+       { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR)    , nv50_sor_mthd },
+       { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN)    , nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL)   , nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
+       { DAC_MTHD(NV50_DISP_DAC_PWR)         , nv50_dac_mthd },
+       { DAC_MTHD(NV50_DISP_DAC_LOAD)        , nv50_dac_mthd },
+       {},
+};
+
+static struct nouveau_oclass
+nva3_disp_base_oclass[] = {
+       { NVA3_DISP_CLASS, &nv50_disp_base_ofuncs, nva3_disp_base_omthds },
+       {}
+};
+
+static int
+nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+              struct nouveau_oclass *oclass, void *data, u32 size,
+              struct nouveau_object **pobject)
+{
+       struct nv50_disp_priv *priv;
+       int ret;
+
+       ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+                                 "display", &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_engine(priv)->sclass = nva3_disp_base_oclass;
+       nv_engine(priv)->cclass = &nv50_disp_cclass;
+       nv_subdev(priv)->intr = nv50_disp_intr;
+       priv->sclass = nva3_disp_sclass;
+       priv->head.nr = 2;
+       priv->dac.nr = 3;
+       priv->sor.nr = 4;
+       priv->dac.power = nv50_dac_power;
+       priv->dac.sense = nv50_dac_sense;
+       priv->sor.power = nv50_sor_power;
+       priv->sor.hda_eld = nva3_hda_eld;
+       priv->sor.hdmi = nva3_hdmi_ctrl;
+       priv->sor.dp_train = nv94_sor_dp_train;
+       priv->sor.dp_train_init = nv94_sor_dp_train_init;
+       priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+       priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
+       priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
+
+       INIT_LIST_HEAD(&priv->base.vblank.list);
+       spin_lock_init(&priv->base.vblank.lock);
+       return 0;
+}
+
+struct nouveau_oclass
+nva3_disp_oclass = {
+       .handle = NV_ENGINE(DISP, 0x85),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nva3_disp_ctor,
+               .dtor = _nouveau_disp_dtor,
+               .init = _nouveau_disp_init,
+               .fini = _nouveau_disp_fini,
+       },
+};
index d93efbc..9e38ebf 100644 (file)
  * Authors: Ben Skeggs
  */
 
-#include <subdev/bar.h>
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/handle.h>
+#include <core/class.h>
 
 #include <engine/software.h>
 #include <engine/disp.h>
 
-struct nvd0_disp_priv {
-       struct nouveau_disp base;
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/bar.h>
+#include <subdev/clock.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * EVO DMA channel base class
+ ******************************************************************************/
+
+static int
+nvd0_disp_dmac_object_attach(struct nouveau_object *parent,
+                            struct nouveau_object *object, u32 name)
+{
+       struct nv50_disp_base *base = (void *)parent->parent;
+       struct nv50_disp_chan *chan = (void *)parent;
+       u32 addr = nv_gpuobj(object)->node->offset;
+       u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001;
+       return nouveau_ramht_insert(base->ramht, chan->chid, name, data);
+}
+
+static void
+nvd0_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
+{
+       struct nv50_disp_base *base = (void *)parent->parent;
+       nouveau_ramht_remove(base->ramht, cookie);
+}
+
+static int
+nvd0_disp_dmac_init(struct nouveau_object *object)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_dmac *dmac = (void *)object;
+       int chid = dmac->base.chid;
+       int ret;
+
+       ret = nv50_disp_chan_init(&dmac->base);
+       if (ret)
+               return ret;
+
+       /* enable error reporting */
+       nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
+       nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+       /* initialise channel for dma command submission */
+       nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push);
+       nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000);
+       nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001);
+       nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
+       nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
+       nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013);
+
+       /* wait for it to go inactive */
+       if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) {
+               nv_error(dmac, "init: 0x%08x\n",
+                        nv_rd32(priv, 0x610490 + (chid * 0x10)));
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static int
+nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_dmac *dmac = (void *)object;
+       int chid = dmac->base.chid;
+
+       /* deactivate channel */
+       nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
+       nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
+       if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) {
+               nv_error(dmac, "fini: 0x%08x\n",
+                        nv_rd32(priv, 0x610490 + (chid * 0x10)));
+               if (suspend)
+                       return -EBUSY;
+       }
+
+       /* disable error reporting */
+       nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
+       nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
+
+       return nv50_disp_chan_fini(&dmac->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+static int
+nvd0_disp_mast_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_display_mast_class *args = data;
+       struct nv50_disp_dmac *mast;
+       int ret;
+
+       if (size < sizeof(*args))
+               return -EINVAL;
+
+       ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+                                    0, sizeof(*mast), (void **)&mast);
+       *pobject = nv_object(mast);
+       if (ret)
+               return ret;
+
+       nv_parent(mast)->object_attach = nvd0_disp_dmac_object_attach;
+       nv_parent(mast)->object_detach = nvd0_disp_dmac_object_detach;
+       return 0;
+}
+
+static int
+nvd0_disp_mast_init(struct nouveau_object *object)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_dmac *mast = (void *)object;
+       int ret;
+
+       ret = nv50_disp_chan_init(&mast->base);
+       if (ret)
+               return ret;
+
+       /* enable error reporting */
+       nv_mask(priv, 0x610090, 0x00000001, 0x00000001);
+       nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001);
+
+       /* initialise channel for dma command submission */
+       nv_wr32(priv, 0x610494, mast->push);
+       nv_wr32(priv, 0x610498, 0x00010000);
+       nv_wr32(priv, 0x61049c, 0x00000001);
+       nv_mask(priv, 0x610490, 0x00000010, 0x00000010);
+       nv_wr32(priv, 0x640000, 0x00000000);
+       nv_wr32(priv, 0x610490, 0x01000013);
+
+       /* wait for it to go inactive */
+       if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) {
+               nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490));
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static int
+nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_dmac *mast = (void *)object;
+
+       /* deactivate channel */
+       nv_mask(priv, 0x610490, 0x00000010, 0x00000000);
+       nv_mask(priv, 0x610490, 0x00000003, 0x00000000);
+       if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) {
+               nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490));
+               if (suspend)
+                       return -EBUSY;
+       }
+
+       /* disable error reporting */
+       nv_mask(priv, 0x610090, 0x00000001, 0x00000000);
+       nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000);
+
+       return nv50_disp_chan_fini(&mast->base, suspend);
+}
+
+struct nouveau_ofuncs
+nvd0_disp_mast_ofuncs = {
+       .ctor = nvd0_disp_mast_ctor,
+       .dtor = nv50_disp_dmac_dtor,
+       .init = nvd0_disp_mast_init,
+       .fini = nvd0_disp_mast_fini,
+       .rd32 = nv50_disp_chan_rd32,
+       .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO sync channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_sync_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_display_sync_class *args = data;
+       struct nv50_disp_priv *priv = (void *)engine;
+       struct nv50_disp_dmac *dmac;
+       int ret;
+
+       if (size < sizeof(*data) || args->head >= priv->head.nr)
+               return -EINVAL;
+
+       ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+                                    1 + args->head, sizeof(*dmac),
+                                    (void **)&dmac);
+       *pobject = nv_object(dmac);
+       if (ret)
+               return ret;
+
+       nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
+       nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
+       return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_sync_ofuncs = {
+       .ctor = nvd0_disp_sync_ctor,
+       .dtor = nv50_disp_dmac_dtor,
+       .init = nvd0_disp_dmac_init,
+       .fini = nvd0_disp_dmac_fini,
+       .rd32 = nv50_disp_chan_rd32,
+       .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_ovly_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_display_ovly_class *args = data;
+       struct nv50_disp_priv *priv = (void *)engine;
+       struct nv50_disp_dmac *dmac;
+       int ret;
+
+       if (size < sizeof(*data) || args->head >= priv->head.nr)
+               return -EINVAL;
+
+       ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+                                    5 + args->head, sizeof(*dmac),
+                                    (void **)&dmac);
+       *pobject = nv_object(dmac);
+       if (ret)
+               return ret;
+
+       nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
+       nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
+       return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_ovly_ofuncs = {
+       .ctor = nvd0_disp_ovly_ctor,
+       .dtor = nv50_disp_dmac_dtor,
+       .init = nvd0_disp_dmac_init,
+       .fini = nvd0_disp_dmac_fini,
+       .rd32 = nv50_disp_chan_rd32,
+       .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO PIO channel base class
+ ******************************************************************************/
+
+static int
+nvd0_disp_pioc_create_(struct nouveau_object *parent,
+                      struct nouveau_object *engine,
+                      struct nouveau_oclass *oclass, int chid,
+                      int length, void **pobject)
+{
+       return nv50_disp_chan_create_(parent, engine, oclass, chid,
+                                     length, pobject);
+}
+
+static void
+nvd0_disp_pioc_dtor(struct nouveau_object *object)
+{
+       struct nv50_disp_pioc *pioc = (void *)object;
+       nv50_disp_chan_destroy(&pioc->base);
+}
+
+static int
+nvd0_disp_pioc_init(struct nouveau_object *object)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_pioc *pioc = (void *)object;
+       int chid = pioc->base.chid;
+       int ret;
+
+       ret = nv50_disp_chan_init(&pioc->base);
+       if (ret)
+               return ret;
+
+       /* enable error reporting */
+       nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
+       nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+       /* activate channel */
+       nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001);
+       if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) {
+               nv_error(pioc, "init: 0x%08x\n",
+                        nv_rd32(priv, 0x610490 + (chid * 0x10)));
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static int
+nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_pioc *pioc = (void *)object;
+       int chid = pioc->base.chid;
+
+       nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
+       if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) {
+               nv_error(pioc, "timeout: 0x%08x\n",
+                        nv_rd32(priv, 0x610490 + (chid * 0x10)));
+               if (suspend)
+                       return -EBUSY;
+       }
+
+       /* disable error reporting */
+       nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
+       nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
+
+       return nv50_disp_chan_fini(&pioc->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO immediate overlay channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_oimm_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_display_oimm_class *args = data;
+       struct nv50_disp_priv *priv = (void *)engine;
+       struct nv50_disp_pioc *pioc;
+       int ret;
+
+       if (size < sizeof(*args) || args->head >= priv->head.nr)
+               return -EINVAL;
+
+       ret = nvd0_disp_pioc_create_(parent, engine, oclass, 9 + args->head,
+                                    sizeof(*pioc), (void **)&pioc);
+       *pobject = nv_object(pioc);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_oimm_ofuncs = {
+       .ctor = nvd0_disp_oimm_ctor,
+       .dtor = nvd0_disp_pioc_dtor,
+       .init = nvd0_disp_pioc_init,
+       .fini = nvd0_disp_pioc_fini,
+       .rd32 = nv50_disp_chan_rd32,
+       .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO cursor channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_curs_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_display_curs_class *args = data;
+       struct nv50_disp_priv *priv = (void *)engine;
+       struct nv50_disp_pioc *pioc;
+       int ret;
+
+       if (size < sizeof(*args) || args->head >= priv->head.nr)
+               return -EINVAL;
+
+       ret = nvd0_disp_pioc_create_(parent, engine, oclass, 13 + args->head,
+                                    sizeof(*pioc), (void **)&pioc);
+       *pobject = nv_object(pioc);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_curs_ofuncs = {
+       .ctor = nvd0_disp_curs_ctor,
+       .dtor = nvd0_disp_pioc_dtor,
+       .init = nvd0_disp_pioc_init,
+       .fini = nvd0_disp_pioc_fini,
+       .rd32 = nv50_disp_chan_rd32,
+       .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static int
+nvd0_disp_base_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_disp_priv *priv = (void *)engine;
+       struct nv50_disp_base *base;
+       int ret;
+
+       ret = nouveau_parent_create(parent, engine, oclass, 0,
+                                   priv->sclass, 0, &base);
+       *pobject = nv_object(base);
+       if (ret)
+               return ret;
+
+       return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
+}
+
+static void
+nvd0_disp_base_dtor(struct nouveau_object *object)
+{
+       struct nv50_disp_base *base = (void *)object;
+       nouveau_ramht_ref(NULL, &base->ramht);
+       nouveau_parent_destroy(&base->base);
+}
+
+static int
+nvd0_disp_base_init(struct nouveau_object *object)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_base *base = (void *)object;
+       int ret, i;
+       u32 tmp;
+
+       ret = nouveau_parent_init(&base->base);
+       if (ret)
+               return ret;
+
+       /* The below segments of code copying values from one register to
+        * another appear to inform EVO of the display capabilities or
+        * something similar.
+        */
+
+       /* ... CRTC caps */
+       for (i = 0; i < priv->head.nr; i++) {
+               tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
+               nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp);
+               tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
+               nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp);
+               tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
+               nv_wr32(priv, 0x6101bc + (i * 0x800), tmp);
+       }
+
+       /* ... DAC caps */
+       for (i = 0; i < priv->dac.nr; i++) {
+               tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
+               nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp);
+       }
+
+       /* ... SOR caps */
+       for (i = 0; i < priv->sor.nr; i++) {
+               tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
+               nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp);
+       }
+
+       /* steal display away from vbios, or something like that */
+       if (nv_rd32(priv, 0x6100ac) & 0x00000100) {
+               nv_wr32(priv, 0x6100ac, 0x00000100);
+               nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
+               if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
+                       nv_error(priv, "timeout acquiring display\n");
+                       return -EBUSY;
+               }
+       }
+
+       /* point at display engine memory area (hash table, objects) */
+       nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9);
+
+       /* enable supervisor interrupts, disable everything else */
+       nv_wr32(priv, 0x610090, 0x00000000);
+       nv_wr32(priv, 0x6100a0, 0x00000000);
+       nv_wr32(priv, 0x6100b0, 0x00000307);
+
+       return 0;
+}
+
+static int
+nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_base *base = (void *)object;
+
+       /* disable all interrupts */
+       nv_wr32(priv, 0x6100b0, 0x00000000);
+
+       return nouveau_parent_fini(&base->base, suspend);
+}
+
+struct nouveau_ofuncs
+nvd0_disp_base_ofuncs = {
+       .ctor = nvd0_disp_base_ctor,
+       .dtor = nvd0_disp_base_dtor,
+       .init = nvd0_disp_base_init,
+       .fini = nvd0_disp_base_fini,
+};
+
+static struct nouveau_oclass
+nvd0_disp_base_oclass[] = {
+       { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+       {}
 };
 
 static struct nouveau_oclass
 nvd0_disp_sclass[] = {
-       {},
+       { NVD0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+       { NVD0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+       { NVD0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+       { NVD0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+       { NVD0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+       {}
 };
 
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static u16
+exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
+           struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+           struct nvbios_outp *info)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       u16 mask, type, data;
+
+       if (outp < 4) {
+               type = DCB_OUTPUT_ANALOG;
+               mask = 0;
+       } else {
+               outp -= 4;
+               switch (ctrl & 0x00000f00) {
+               case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
+               case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
+               case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
+               case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
+               case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
+               case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
+               default:
+                       nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
+                       return 0x0000;
+               }
+               dcb->sorconf.link = mask;
+       }
+
+       mask  = 0x00c0 & (mask << 6);
+       mask |= 0x0001 << outp;
+       mask |= 0x0100 << head;
+
+       data = dcb_outp_match(bios, type, mask, ver, hdr, dcb);
+       if (!data)
+               return 0x0000;
+
+       return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
+}
+
+static bool
+exec_script(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, int id)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       struct nvbios_outp info;
+       struct dcb_output dcb;
+       u8  ver, hdr, cnt, len;
+       u16 data;
+
+       data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
+       if (data) {
+               struct nvbios_init init = {
+                       .subdev = nv_subdev(priv),
+                       .bios = bios,
+                       .offset = info.script[id],
+                       .outp = &dcb,
+                       .crtc = head,
+                       .execute = 1,
+               };
+
+               return nvbios_exec(&init) == 0;
+       }
+
+       return false;
+}
+
+static u32
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int outp,
+           u32 ctrl, int id, u32 pclk)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       struct nvbios_outp info1;
+       struct nvbios_ocfg info2;
+       struct dcb_output dcb;
+       u8  ver, hdr, cnt, len;
+       u16 data, conf;
+
+       data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info1);
+       if (data == 0x0000)
+               return false;
+
+       switch (dcb.type) {
+       case DCB_OUTPUT_TMDS:
+               conf = (ctrl & 0x00000f00) >> 8;
+               if (pclk >= 165000)
+                       conf |= 0x0100;
+               break;
+       case DCB_OUTPUT_LVDS:
+               conf = priv->sor.lvdsconf;
+               break;
+       case DCB_OUTPUT_DP:
+               conf = (ctrl & 0x00000f00) >> 8;
+               break;
+       case DCB_OUTPUT_ANALOG:
+       default:
+               conf = 0x00ff;
+               break;
+       }
+
+       data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
+       if (data) {
+               data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+               if (data) {
+                       struct nvbios_init init = {
+                               .subdev = nv_subdev(priv),
+                               .bios = bios,
+                               .offset = data,
+                               .outp = &dcb,
+                               .crtc = head,
+                               .execute = 1,
+                       };
+
+                       if (nvbios_exec(&init))
+                               return 0x0000;
+                       return conf;
+               }
+       }
+
+       return 0x0000;
+}
+
+static void
+nvd0_display_unk1_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+{
+       int i;
+
+       for (i = 0; mask && i < 8; i++) {
+               u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
+               if (mcc & (1 << head))
+                       exec_script(priv, head, i, mcc, 1);
+       }
+
+       nv_wr32(priv, 0x6101d4, 0x00000000);
+       nv_wr32(priv, 0x6109d4, 0x00000000);
+       nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
 static void
-nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
+nvd0_display_unk2_calc_tu(struct nv50_disp_priv *priv, int head, int or)
+{
+       const u32 ctrl = nv_rd32(priv, 0x660200 + (or   * 0x020));
+       const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
+       const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+       const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
+       const u32 hoff = (head * 0x800);
+       const u32 soff = (  or * 0x800);
+       const u32 loff = (link * 0x080) + soff;
+       const u32 symbol = 100000;
+       const u32 TU = 64;
+       u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x000f0000;
+       u32 clksor = nv_rd32(priv, 0x612300 + soff);
+       u32 datarate, link_nr, link_bw, bits;
+       u64 ratio, value;
+
+       if      ((conf & 0x3c0) == 0x180) bits = 30;
+       else if ((conf & 0x3c0) == 0x140) bits = 24;
+       else                              bits = 18;
+       datarate = (pclk * bits) / 8;
+
+       if      (dpctrl > 0x00030000) link_nr = 4;
+       else if (dpctrl > 0x00010000) link_nr = 2;
+       else                          link_nr = 1;
+
+       link_bw  = (clksor & 0x007c0000) >> 18;
+       link_bw *= 27000;
+
+       ratio  = datarate;
+       ratio *= symbol;
+       do_div(ratio, link_nr * link_bw);
+
+       value  = (symbol - ratio) * TU;
+       value *= ratio;
+       do_div(value, symbol);
+       do_div(value, symbol);
+
+       value += 5;
+       value |= 0x08000000;
+
+       nv_wr32(priv, 0x616610 + hoff, value);
+}
+
+static void
+nvd0_display_unk2_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+{
+       u32 pclk;
+       int i;
+
+       for (i = 0; mask && i < 8; i++) {
+               u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
+               if (mcc & (1 << head))
+                       exec_script(priv, head, i, mcc, 2);
+       }
+
+       pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+       nv_debug(priv, "head %d pclk %d mask 0x%08x\n", head, pclk, mask);
+       if (pclk && (mask & 0x00010000)) {
+               struct nouveau_clock *clk = nouveau_clock(priv);
+               clk->pll_set(clk, PLL_VPLL0 + head, pclk);
+       }
+
+       nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
+
+       for (i = 0; mask && i < 8; i++) {
+               u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20)), cfg;
+               if (mcp & (1 << head)) {
+                       if ((cfg = exec_clkcmp(priv, head, i, mcp, 0, pclk))) {
+                               u32 addr, mask, data = 0x00000000;
+                               if (i < 4) {
+                                       addr = 0x612280 + ((i - 0) * 0x800);
+                                       mask = 0xffffffff;
+                               } else {
+                                       switch (mcp & 0x00000f00) {
+                                       case 0x00000800:
+                                       case 0x00000900:
+                                               nvd0_display_unk2_calc_tu(priv, head, i - 4);
+                                               break;
+                                       default:
+                                               break;
+                                       }
+
+                                       addr = 0x612300 + ((i - 4) * 0x800);
+                                       mask = 0x00000707;
+                                       if (cfg & 0x00000100)
+                                               data = 0x00000101;
+                               }
+                               nv_mask(priv, addr, mask, data);
+                       }
+                       break;
+               }
+       }
+
+       nv_wr32(priv, 0x6101d4, 0x00000000);
+       nv_wr32(priv, 0x6109d4, 0x00000000);
+       nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
+static void
+nvd0_display_unk4_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+{
+       int pclk, i;
+
+       pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+
+       for (i = 0; mask && i < 8; i++) {
+               u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20));
+               if (mcp & (1 << head))
+                       exec_clkcmp(priv, head, i, mcp, 1, pclk);
+       }
+
+       nv_wr32(priv, 0x6101d4, 0x00000000);
+       nv_wr32(priv, 0x6109d4, 0x00000000);
+       nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
+static void
+nvd0_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
 {
        struct nouveau_bar *bar = nouveau_bar(priv);
        struct nouveau_disp *disp = &priv->base;
@@ -65,14 +851,71 @@ nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
                disp->vblank.notify(disp->vblank.data, crtc);
 }
 
-static void
+void
 nvd0_disp_intr(struct nouveau_subdev *subdev)
 {
-       struct nvd0_disp_priv *priv = (void *)subdev;
+       struct nv50_disp_priv *priv = (void *)subdev;
        u32 intr = nv_rd32(priv, 0x610088);
        int i;
 
-       for (i = 0; i < 4; i++) {
+       if (intr & 0x00000001) {
+               u32 stat = nv_rd32(priv, 0x61008c);
+               nv_wr32(priv, 0x61008c, stat);
+               intr &= ~0x00000001;
+       }
+
+       if (intr & 0x00000002) {
+               u32 stat = nv_rd32(priv, 0x61009c);
+               int chid = ffs(stat) - 1;
+               if (chid >= 0) {
+                       u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
+                       u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
+                       u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
+
+                       nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
+                                      "0x%08x 0x%08x\n",
+                                chid, (mthd & 0x0000ffc), data, mthd, unkn);
+                       nv_wr32(priv, 0x61009c, (1 << chid));
+                       nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
+               }
+
+               intr &= ~0x00000002;
+       }
+
+       if (intr & 0x00100000) {
+               u32 stat = nv_rd32(priv, 0x6100ac);
+               u32 mask = 0, crtc = ~0;
+
+               while (!mask && ++crtc < priv->head.nr)
+                       mask = nv_rd32(priv, 0x6101d4 + (crtc * 0x800));
+
+               if (stat & 0x00000001) {
+                       nv_wr32(priv, 0x6100ac, 0x00000001);
+                       nvd0_display_unk1_handler(priv, crtc, mask);
+                       stat &= ~0x00000001;
+               }
+
+               if (stat & 0x00000002) {
+                       nv_wr32(priv, 0x6100ac, 0x00000002);
+                       nvd0_display_unk2_handler(priv, crtc, mask);
+                       stat &= ~0x00000002;
+               }
+
+               if (stat & 0x00000004) {
+                       nv_wr32(priv, 0x6100ac, 0x00000004);
+                       nvd0_display_unk4_handler(priv, crtc, mask);
+                       stat &= ~0x00000004;
+               }
+
+               if (stat) {
+                       nv_info(priv, "unknown intr24 0x%08x\n", stat);
+                       nv_wr32(priv, 0x6100ac, stat);
+               }
+
+               intr &= ~0x00100000;
+       }
+
+       for (i = 0; i < priv->head.nr; i++) {
                u32 mask = 0x01000000 << i;
                if (mask & intr) {
                        u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
@@ -86,10 +929,10 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
 
 static int
 nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-                 struct nouveau_oclass *oclass, void *data, u32 size,
-                 struct nouveau_object **pobject)
+              struct nouveau_oclass *oclass, void *data, u32 size,
+              struct nouveau_object **pobject)
 {
-       struct nvd0_disp_priv *priv;
+       struct nv50_disp_priv *priv;
        int ret;
 
        ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
@@ -98,8 +941,23 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       nv_engine(priv)->sclass = nvd0_disp_sclass;
+       nv_engine(priv)->sclass = nvd0_disp_base_oclass;
+       nv_engine(priv)->cclass = &nv50_disp_cclass;
        nv_subdev(priv)->intr = nvd0_disp_intr;
+       priv->sclass = nvd0_disp_sclass;
+       priv->head.nr = nv_rd32(priv, 0x022448);
+       priv->dac.nr = 3;
+       priv->sor.nr = 4;
+       priv->dac.power = nv50_dac_power;
+       priv->dac.sense = nv50_dac_sense;
+       priv->sor.power = nv50_sor_power;
+       priv->sor.hda_eld = nvd0_hda_eld;
+       priv->sor.hdmi = nvd0_hdmi_ctrl;
+       priv->sor.dp_train = nvd0_sor_dp_train;
+       priv->sor.dp_train_init = nv94_sor_dp_train_init;
+       priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+       priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
+       priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
 
        INIT_LIST_HEAD(&priv->base.vblank.list);
        spin_lock_init(&priv->base.vblank.lock);
@@ -108,7 +966,7 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 
 struct nouveau_oclass
 nvd0_disp_oclass = {
-       .handle = NV_ENGINE(DISP, 0xd0),
+       .handle = NV_ENGINE(DISP, 0x90),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nvd0_disp_ctor,
                .dtor = _nouveau_disp_dtor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
new file mode 100644 (file)
index 0000000..259537c
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nve0_disp_sclass[] = {
+       { NVE0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+       { NVE0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+       { NVE0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+       { NVE0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+       { NVE0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+       {}
+};
+
+static struct nouveau_oclass
+nve0_disp_base_oclass[] = {
+       { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+       {}
+};
+
+static int
+nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+              struct nouveau_oclass *oclass, void *data, u32 size,
+              struct nouveau_object **pobject)
+{
+       struct nv50_disp_priv *priv;
+       int ret;
+
+       ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+                                 "display", &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_engine(priv)->sclass = nve0_disp_base_oclass;
+       nv_engine(priv)->cclass = &nv50_disp_cclass;
+       nv_subdev(priv)->intr = nvd0_disp_intr;
+       priv->sclass = nve0_disp_sclass;
+       priv->head.nr = nv_rd32(priv, 0x022448);
+       priv->dac.nr = 3;
+       priv->sor.nr = 4;
+       priv->dac.power = nv50_dac_power;
+       priv->dac.sense = nv50_dac_sense;
+       priv->sor.power = nv50_sor_power;
+       priv->sor.hda_eld = nvd0_hda_eld;
+       priv->sor.hdmi = nvd0_hdmi_ctrl;
+       priv->sor.dp_train = nvd0_sor_dp_train;
+       priv->sor.dp_train_init = nv94_sor_dp_train_init;
+       priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+       priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
+       priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
+
+       INIT_LIST_HEAD(&priv->base.vblank.list);
+       spin_lock_init(&priv->base.vblank.lock);
+       return 0;
+}
+
+struct nouveau_oclass
+nve0_disp_oclass = {
+       .handle = NV_ENGINE(DISP, 0x91),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nve0_disp_ctor,
+               .dtor = _nouveau_disp_dtor,
+               .init = _nouveau_disp_init,
+               .fini = _nouveau_disp_fini,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
new file mode 100644 (file)
index 0000000..39b6b67
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+
+#include "nv50.h"
+
+int
+nv50_sor_power(struct nv50_disp_priv *priv, int or, u32 data)
+{
+       const u32 stat = data & NV50_DISP_SOR_PWR_STATE;
+       const u32 soff = (or * 0x800);
+       nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
+       nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat);
+       nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
+       nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000);
+       return 0;
+}
+
+int
+nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       const u16 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12;
+       const u8  head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3;
+       const u8  link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2;
+       const u8    or = (mthd & NV50_DISP_SOR_MTHD_OR);
+       const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or);
+       struct dcb_output outp;
+       u8  ver, hdr;
+       u32 data;
+       int ret = -EINVAL;
+
+       if (size < sizeof(u32))
+               return -EINVAL;
+       data = *(u32 *)args;
+
+       if (type && !dcb_outp_match(bios, type, mask, &ver, &hdr, &outp))
+               return -ENODEV;
+
+       switch (mthd & ~0x3f) {
+       case NV50_DISP_SOR_PWR:
+               ret = priv->sor.power(priv, or, data);
+               break;
+       case NVA3_DISP_SOR_HDA_ELD:
+               ret = priv->sor.hda_eld(priv, or, args, size);
+               break;
+       case NV84_DISP_SOR_HDMI_PWR:
+               ret = priv->sor.hdmi(priv, head, or, data);
+               break;
+       case NV50_DISP_SOR_LVDS_SCRIPT:
+               priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID;
+               ret = 0;
+               break;
+       case NV94_DISP_SOR_DP_TRAIN:
+               switch (data & NV94_DISP_SOR_DP_TRAIN_OP) {
+               case NV94_DISP_SOR_DP_TRAIN_OP_PATTERN:
+                       ret = priv->sor.dp_train(priv, or, link, type, mask, data, &outp);
+                       break;
+               case NV94_DISP_SOR_DP_TRAIN_OP_INIT:
+                       ret = priv->sor.dp_train_init(priv, or, link, head, type, mask, data, &outp);
+                       break;
+               case NV94_DISP_SOR_DP_TRAIN_OP_FINI:
+                       ret = priv->sor.dp_train_fini(priv, or, link, head, type, mask, data, &outp);
+                       break;
+               default:
+                       break;
+               }
+               break;
+       case NV94_DISP_SOR_DP_LNKCTL:
+               ret = priv->sor.dp_lnkctl(priv, or, link, head, type, mask, data, &outp);
+               break;
+       case NV94_DISP_SOR_DP_DRVCTL(0):
+       case NV94_DISP_SOR_DP_DRVCTL(1):
+       case NV94_DISP_SOR_DP_DRVCTL(2):
+       case NV94_DISP_SOR_DP_DRVCTL(3):
+               ret = priv->sor.dp_drvctl(priv, or, link, (mthd & 0xc0) >> 6,
+                                         type, mask, data, &outp);
+               break;
+       default:
+               BUG_ON(1);
+       }
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
new file mode 100644 (file)
index 0000000..f6edd00
--- /dev/null
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+static inline u32
+nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+{
+       static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
+       static const u8 nv94[] = { 16, 8, 0, 24 };
+       if (nv_device(priv)->chipset == 0xaf)
+               return nvaf[lane];
+       return nv94[lane];
+}
+
+int
+nv94_sor_dp_train_init(struct nv50_disp_priv *priv, int or, int link, int head,
+                      u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       struct nvbios_dpout info;
+       u8  ver, hdr, cnt, len;
+       u16 outp;
+
+       outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+       if (outp) {
+               struct nvbios_init init = {
+                       .subdev = nv_subdev(priv),
+                       .bios = bios,
+                       .outp = dcbo,
+                       .crtc = head,
+                       .execute = 1,
+               };
+
+               if (data & NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON)
+                       init.offset = info.script[2];
+               else
+                       init.offset = info.script[3];
+               nvbios_exec(&init);
+
+               init.offset = info.script[0];
+               nvbios_exec(&init);
+       }
+
+       return 0;
+}
+
+int
+nv94_sor_dp_train_fini(struct nv50_disp_priv *priv, int or, int link, int head,
+                      u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       struct nvbios_dpout info;
+       u8  ver, hdr, cnt, len;
+       u16 outp;
+
+       outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+       if (outp) {
+               struct nvbios_init init = {
+                       .subdev = nv_subdev(priv),
+                       .bios = bios,
+                       .offset = info.script[1],
+                       .outp = dcbo,
+                       .crtc = head,
+                       .execute = 1,
+               };
+
+               nvbios_exec(&init);
+       }
+
+       return 0;
+}
+
+int
+nv94_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
+                 u16 type, u16 mask, u32 data, struct dcb_output *info)
+{
+       const u32 loff = (or * 0x800) + (link * 0x80);
+       const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
+       nv_mask(priv, 0x61c10c + loff, 0x0f000000, patt << 24);
+       return 0;
+}
+
+int
+nv94_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
+                  u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       const u32 loff = (or * 0x800) + (link * 0x80);
+       const u32 soff = (or * 0x800);
+       u16 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
+       u8  link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
+       u32 dpctrl = 0x00000000;
+       u32 clksor = 0x00000000;
+       u32 outp, lane = 0;
+       u8  ver, hdr, cnt, len;
+       struct nvbios_dpout info;
+       int i;
+
+       /* -> 10Khz units */
+       link_bw *= 2700;
+
+       outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+       if (outp && info.lnkcmp) {
+               struct nvbios_init init = {
+                       .subdev = nv_subdev(priv),
+                       .bios = bios,
+                       .offset = 0x0000,
+                       .outp = dcbo,
+                       .crtc = head,
+                       .execute = 1,
+               };
+
+               while (link_bw < nv_ro16(bios, info.lnkcmp))
+                       info.lnkcmp += 4;
+               init.offset = nv_ro16(bios, info.lnkcmp + 2);
+
+               nvbios_exec(&init);
+       }
+
+       dpctrl |= ((1 << link_nr) - 1) << 16;
+       if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
+               dpctrl |= 0x00004000;
+       if (link_bw > 16200)
+               clksor |= 0x00040000;
+
+       for (i = 0; i < link_nr; i++)
+               lane |= 1 << (nv94_sor_dp_lane_map(priv, i) >> 3);
+
+       nv_mask(priv, 0x614300 + soff, 0x000c0000, clksor);
+       nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
+       nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
+       return 0;
+}
+
+int
+nv94_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
+                  u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       const u32 loff = (or * 0x800) + (link * 0x80);
+       const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
+       const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
+       u32 addr, shift = nv94_sor_dp_lane_map(priv, lane);
+       u8  ver, hdr, cnt, len;
+       struct nvbios_dpout outp;
+       struct nvbios_dpcfg ocfg;
+
+       addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
+       if (!addr)
+               return -ENODEV;
+
+       addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
+       if (!addr)
+               return -EINVAL;
+
+       nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
+       nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
+       nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
new file mode 100644 (file)
index 0000000..c37ce7e
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+static inline u32
+nvd0_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+{
+       static const u8 nvd0[] = { 16, 8, 0, 24 };
+       return nvd0[lane];
+}
+
+int
+nvd0_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
+                 u16 type, u16 mask, u32 data, struct dcb_output *info)
+{
+       const u32 loff = (or * 0x800) + (link * 0x80);
+       const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
+       nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * patt);
+       return 0;
+}
+
+int
+nvd0_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
+                  u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       const u32 loff = (or * 0x800) + (link * 0x80);
+       const u32 soff = (or * 0x800);
+       const u8  link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
+       const u8  link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
+       u32 dpctrl = 0x00000000;
+       u32 clksor = 0x00000000;
+       u32 outp, lane = 0;
+       u8  ver, hdr, cnt, len;
+       struct nvbios_dpout info;
+       int i;
+
+       outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+       if (outp && info.lnkcmp) {
+               struct nvbios_init init = {
+                       .subdev = nv_subdev(priv),
+                       .bios = bios,
+                       .offset = 0x0000,
+                       .outp = dcbo,
+                       .crtc = head,
+                       .execute = 1,
+               };
+
+               while (nv_ro08(bios, info.lnkcmp) < link_bw)
+                       info.lnkcmp += 3;
+               init.offset = nv_ro16(bios, info.lnkcmp + 1);
+
+               nvbios_exec(&init);
+       }
+
+       clksor |= link_bw << 18;
+       dpctrl |= ((1 << link_nr) - 1) << 16;
+       if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
+               dpctrl |= 0x00004000;
+
+       for (i = 0; i < link_nr; i++)
+               lane |= 1 << (nvd0_sor_dp_lane_map(priv, i) >> 3);
+
+       nv_mask(priv, 0x612300 + soff, 0x007c0000, clksor);
+       nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
+       nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
+       return 0;
+}
+
+int
+nvd0_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
+                  u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       const u32 loff = (or * 0x800) + (link * 0x80);
+       const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
+       const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
+       u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane);
+       u8  ver, hdr, cnt, len;
+       struct nvbios_dpout outp;
+       struct nvbios_dpcfg ocfg;
+
+       addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
+       if (!addr)
+               return -ENODEV;
+
+       addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
+       if (!addr)
+               return -EINVAL;
+
+       nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
+       nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
+       nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
+       nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000);
+       return 0;
+}
index e1f013d..5103e88 100644 (file)
 #include <subdev/fb.h>
 #include <engine/dmaobj.h>
 
-int
-nouveau_dmaobj_create_(struct nouveau_object *parent,
-                      struct nouveau_object *engine,
-                      struct nouveau_oclass *oclass,
-                      void *data, u32 size, int len, void **pobject)
+static int
+nouveau_dmaobj_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
 {
+       struct nouveau_dmaeng *dmaeng = (void *)engine;
+       struct nouveau_dmaobj *dmaobj;
+       struct nouveau_gpuobj *gpuobj;
        struct nv_dma_class *args = data;
-       struct nouveau_dmaobj *object;
        int ret;
 
        if (size < sizeof(*args))
                return -EINVAL;
 
-       ret = nouveau_object_create_(parent, engine, oclass, 0, len, pobject);
-       object = *pobject;
+       ret = nouveau_object_create(parent, engine, oclass, 0, &dmaobj);
+       *pobject = nv_object(dmaobj);
        if (ret)
                return ret;
 
        switch (args->flags & NV_DMA_TARGET_MASK) {
        case NV_DMA_TARGET_VM:
-               object->target = NV_MEM_TARGET_VM;
+               dmaobj->target = NV_MEM_TARGET_VM;
                break;
        case NV_DMA_TARGET_VRAM:
-               object->target = NV_MEM_TARGET_VRAM;
+               dmaobj->target = NV_MEM_TARGET_VRAM;
                break;
        case NV_DMA_TARGET_PCI:
-               object->target = NV_MEM_TARGET_PCI;
+               dmaobj->target = NV_MEM_TARGET_PCI;
                break;
        case NV_DMA_TARGET_PCI_US:
        case NV_DMA_TARGET_AGP:
-               object->target = NV_MEM_TARGET_PCI_NOSNOOP;
+               dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
                break;
        default:
                return -EINVAL;
@@ -66,22 +68,53 @@ nouveau_dmaobj_create_(struct nouveau_object *parent,
 
        switch (args->flags & NV_DMA_ACCESS_MASK) {
        case NV_DMA_ACCESS_VM:
-               object->access = NV_MEM_ACCESS_VM;
+               dmaobj->access = NV_MEM_ACCESS_VM;
                break;
        case NV_DMA_ACCESS_RD:
-               object->access = NV_MEM_ACCESS_RO;
+               dmaobj->access = NV_MEM_ACCESS_RO;
                break;
        case NV_DMA_ACCESS_WR:
-               object->access = NV_MEM_ACCESS_WO;
+               dmaobj->access = NV_MEM_ACCESS_WO;
                break;
        case NV_DMA_ACCESS_RDWR:
-               object->access = NV_MEM_ACCESS_RW;
+               dmaobj->access = NV_MEM_ACCESS_RW;
                break;
        default:
                return -EINVAL;
        }
 
-       object->start = args->start;
-       object->limit = args->limit;
-       return 0;
+       dmaobj->start = args->start;
+       dmaobj->limit = args->limit;
+       dmaobj->conf0 = args->conf0;
+
+       switch (nv_mclass(parent)) {
+       case NV_DEVICE_CLASS:
+               /* delayed, or no, binding */
+               break;
+       default:
+               ret = dmaeng->bind(dmaeng, *pobject, dmaobj, &gpuobj);
+               if (ret == 0) {
+                       nouveau_object_ref(NULL, pobject);
+                       *pobject = nv_object(gpuobj);
+               }
+               break;
+       }
+
+       return ret;
 }
+
+static struct nouveau_ofuncs
+nouveau_dmaobj_ofuncs = {
+       .ctor = nouveau_dmaobj_ctor,
+       .dtor = nouveau_object_destroy,
+       .init = nouveau_object_init,
+       .fini = nouveau_object_fini,
+};
+
+struct nouveau_oclass
+nouveau_dmaobj_sclass[] = {
+       { NV_DMA_FROM_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+       { NV_DMA_TO_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+       { NV_DMA_IN_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+       {}
+};
index 9f4cc2f..027d821 100644 (file)
@@ -34,10 +34,6 @@ struct nv04_dmaeng_priv {
        struct nouveau_dmaeng base;
 };
 
-struct nv04_dmaobj_priv {
-       struct nouveau_dmaobj base;
-};
-
 static int
 nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
                 struct nouveau_object *parent,
@@ -53,6 +49,18 @@ nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
        u32 length = dmaobj->limit - dmaobj->start;
        int ret;
 
+       if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+               switch (nv_mclass(parent->parent)) {
+               case NV03_CHANNEL_DMA_CLASS:
+               case NV10_CHANNEL_DMA_CLASS:
+               case NV17_CHANNEL_DMA_CLASS:
+               case NV40_CHANNEL_DMA_CLASS:
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       }
+
        if (dmaobj->target == NV_MEM_TARGET_VM) {
                if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) {
                        struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0];
@@ -106,56 +114,6 @@ nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
 }
 
 static int
-nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-                struct nouveau_oclass *oclass, void *data, u32 size,
-                struct nouveau_object **pobject)
-{
-       struct nouveau_dmaeng *dmaeng = (void *)engine;
-       struct nv04_dmaobj_priv *dmaobj;
-       struct nouveau_gpuobj *gpuobj;
-       int ret;
-
-       ret = nouveau_dmaobj_create(parent, engine, oclass,
-                                   data, size, &dmaobj);
-       *pobject = nv_object(dmaobj);
-       if (ret)
-               return ret;
-
-       switch (nv_mclass(parent)) {
-       case NV_DEVICE_CLASS:
-               break;
-       case NV03_CHANNEL_DMA_CLASS:
-       case NV10_CHANNEL_DMA_CLASS:
-       case NV17_CHANNEL_DMA_CLASS:
-       case NV40_CHANNEL_DMA_CLASS:
-               ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
-               nouveau_object_ref(NULL, pobject);
-               *pobject = nv_object(gpuobj);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return ret;
-}
-
-static struct nouveau_ofuncs
-nv04_dmaobj_ofuncs = {
-       .ctor = nv04_dmaobj_ctor,
-       .dtor = _nouveau_dmaobj_dtor,
-       .init = _nouveau_dmaobj_init,
-       .fini = _nouveau_dmaobj_fini,
-};
-
-static struct nouveau_oclass
-nv04_dmaobj_sclass[] = {
-       { 0x0002, &nv04_dmaobj_ofuncs },
-       { 0x0003, &nv04_dmaobj_ofuncs },
-       { 0x003d, &nv04_dmaobj_ofuncs },
-       {}
-};
-
-static int
 nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                 struct nouveau_oclass *oclass, void *data, u32 size,
                 struct nouveau_object **pobject)
@@ -168,7 +126,7 @@ nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       priv->base.base.sclass = nv04_dmaobj_sclass;
+       nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
        priv->base.bind = nv04_dmaobj_bind;
        return 0;
 }
index 045d256..750183f 100644 (file)
@@ -32,36 +32,74 @@ struct nv50_dmaeng_priv {
        struct nouveau_dmaeng base;
 };
 
-struct nv50_dmaobj_priv {
-       struct nouveau_dmaobj base;
-};
-
 static int
 nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
                 struct nouveau_object *parent,
                 struct nouveau_dmaobj *dmaobj,
                 struct nouveau_gpuobj **pgpuobj)
 {
-       u32 flags = nv_mclass(dmaobj);
+       u32 flags0 = nv_mclass(dmaobj);
+       u32 flags5 = 0x00000000;
        int ret;
 
+       if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+               switch (nv_mclass(parent->parent)) {
+               case NV50_CHANNEL_DMA_CLASS:
+               case NV84_CHANNEL_DMA_CLASS:
+               case NV50_CHANNEL_IND_CLASS:
+               case NV84_CHANNEL_IND_CLASS:
+               case NV50_DISP_MAST_CLASS:
+               case NV84_DISP_MAST_CLASS:
+               case NV94_DISP_MAST_CLASS:
+               case NVA0_DISP_MAST_CLASS:
+               case NVA3_DISP_MAST_CLASS:
+               case NV50_DISP_SYNC_CLASS:
+               case NV84_DISP_SYNC_CLASS:
+               case NV94_DISP_SYNC_CLASS:
+               case NVA0_DISP_SYNC_CLASS:
+               case NVA3_DISP_SYNC_CLASS:
+               case NV50_DISP_OVLY_CLASS:
+               case NV84_DISP_OVLY_CLASS:
+               case NV94_DISP_OVLY_CLASS:
+               case NVA0_DISP_OVLY_CLASS:
+               case NVA3_DISP_OVLY_CLASS:
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       if (!(dmaobj->conf0 & NV50_DMA_CONF0_ENABLE)) {
+               if (dmaobj->target == NV_MEM_TARGET_VM) {
+                       dmaobj->conf0  = NV50_DMA_CONF0_PRIV_VM;
+                       dmaobj->conf0 |= NV50_DMA_CONF0_PART_VM;
+                       dmaobj->conf0 |= NV50_DMA_CONF0_COMP_VM;
+                       dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_VM;
+               } else {
+                       dmaobj->conf0  = NV50_DMA_CONF0_PRIV_US;
+                       dmaobj->conf0 |= NV50_DMA_CONF0_PART_256;
+                       dmaobj->conf0 |= NV50_DMA_CONF0_COMP_NONE;
+                       dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_LINEAR;
+               }
+       }
+
+       flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_COMP) << 22;
+       flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_TYPE) << 22;
+       flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_PRIV);
+       flags5 |= (dmaobj->conf0 & NV50_DMA_CONF0_PART);
+
        switch (dmaobj->target) {
        case NV_MEM_TARGET_VM:
-               flags |= 0x00000000;
-               flags |= 0x60000000; /* COMPRESSION_USEVM */
-               flags |= 0x1fc00000; /* STORAGE_TYPE_USEVM */
+               flags0 |= 0x00000000;
                break;
        case NV_MEM_TARGET_VRAM:
-               flags |= 0x00010000;
-               flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+               flags0 |= 0x00010000;
                break;
        case NV_MEM_TARGET_PCI:
-               flags |= 0x00020000;
-               flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+               flags0 |= 0x00020000;
                break;
        case NV_MEM_TARGET_PCI_NOSNOOP:
-               flags |= 0x00030000;
-               flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+               flags0 |= 0x00030000;
                break;
        default:
                return -EINVAL;
@@ -71,79 +109,29 @@ nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
        case NV_MEM_ACCESS_VM:
                break;
        case NV_MEM_ACCESS_RO:
-               flags |= 0x00040000;
+               flags0 |= 0x00040000;
                break;
        case NV_MEM_ACCESS_WO:
        case NV_MEM_ACCESS_RW:
-               flags |= 0x00080000;
+               flags0 |= 0x00080000;
                break;
        }
 
        ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
        if (ret == 0) {
-               nv_wo32(*pgpuobj, 0x00, flags);
+               nv_wo32(*pgpuobj, 0x00, flags0);
                nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
                nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
                nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
                                        upper_32_bits(dmaobj->start));
                nv_wo32(*pgpuobj, 0x10, 0x00000000);
-               nv_wo32(*pgpuobj, 0x14, 0x00000000);
+               nv_wo32(*pgpuobj, 0x14, flags5);
        }
 
        return ret;
 }
 
 static int
-nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-                struct nouveau_oclass *oclass, void *data, u32 size,
-                struct nouveau_object **pobject)
-{
-       struct nouveau_dmaeng *dmaeng = (void *)engine;
-       struct nv50_dmaobj_priv *dmaobj;
-       struct nouveau_gpuobj *gpuobj;
-       int ret;
-
-       ret = nouveau_dmaobj_create(parent, engine, oclass,
-                                   data, size, &dmaobj);
-       *pobject = nv_object(dmaobj);
-       if (ret)
-               return ret;
-
-       switch (nv_mclass(parent)) {
-       case NV_DEVICE_CLASS:
-               break;
-       case NV50_CHANNEL_DMA_CLASS:
-       case NV84_CHANNEL_DMA_CLASS:
-       case NV50_CHANNEL_IND_CLASS:
-       case NV84_CHANNEL_IND_CLASS:
-               ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
-               nouveau_object_ref(NULL, pobject);
-               *pobject = nv_object(gpuobj);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return ret;
-}
-
-static struct nouveau_ofuncs
-nv50_dmaobj_ofuncs = {
-       .ctor = nv50_dmaobj_ctor,
-       .dtor = _nouveau_dmaobj_dtor,
-       .init = _nouveau_dmaobj_init,
-       .fini = _nouveau_dmaobj_fini,
-};
-
-static struct nouveau_oclass
-nv50_dmaobj_sclass[] = {
-       { 0x0002, &nv50_dmaobj_ofuncs },
-       { 0x0003, &nv50_dmaobj_ofuncs },
-       { 0x003d, &nv50_dmaobj_ofuncs },
-       {}
-};
-
-static int
 nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                 struct nouveau_oclass *oclass, void *data, u32 size,
                 struct nouveau_object **pobject)
@@ -156,7 +144,7 @@ nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       priv->base.base.sclass = nv50_dmaobj_sclass;
+       nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
        priv->base.bind = nv50_dmaobj_bind;
        return 0;
 }
index 5baa086..cd3970d 100644 (file)
@@ -22,7 +22,9 @@
  * Authors: Ben Skeggs
  */
 
+#include <core/device.h>
 #include <core/gpuobj.h>
+#include <core/class.h>
 
 #include <subdev/fb.h>
 #include <engine/dmaobj.h>
@@ -31,44 +33,85 @@ struct nvc0_dmaeng_priv {
        struct nouveau_dmaeng base;
 };
 
-struct nvc0_dmaobj_priv {
-       struct nouveau_dmaobj base;
-};
-
 static int
-nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-                struct nouveau_oclass *oclass, void *data, u32 size,
-                struct nouveau_object **pobject)
+nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+                struct nouveau_object *parent,
+                struct nouveau_dmaobj *dmaobj,
+                struct nouveau_gpuobj **pgpuobj)
 {
-       struct nvc0_dmaobj_priv *dmaobj;
+       u32 flags0 = nv_mclass(dmaobj);
+       u32 flags5 = 0x00000000;
        int ret;
 
-       ret = nouveau_dmaobj_create(parent, engine, oclass, data, size, &dmaobj);
-       *pobject = nv_object(dmaobj);
-       if (ret)
-               return ret;
+       if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+               switch (nv_mclass(parent->parent)) {
+               case NVA3_DISP_MAST_CLASS:
+               case NVA3_DISP_SYNC_CLASS:
+               case NVA3_DISP_OVLY_CLASS:
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       } else
+               return 0;
+
+       if (!(dmaobj->conf0 & NVC0_DMA_CONF0_ENABLE)) {
+               if (dmaobj->target == NV_MEM_TARGET_VM) {
+                       dmaobj->conf0  = NVC0_DMA_CONF0_PRIV_VM;
+                       dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_VM;
+               } else {
+                       dmaobj->conf0  = NVC0_DMA_CONF0_PRIV_US;
+                       dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR;
+                       dmaobj->conf0 |= 0x00020000;
+               }
+       }
 
-       if (dmaobj->base.target != NV_MEM_TARGET_VM || dmaobj->base.start)
+       flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_TYPE) << 22;
+       flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_PRIV);
+       flags5 |= (dmaobj->conf0 & NVC0_DMA_CONF0_UNKN);
+
+       switch (dmaobj->target) {
+       case NV_MEM_TARGET_VM:
+               flags0 |= 0x00000000;
+               break;
+       case NV_MEM_TARGET_VRAM:
+               flags0 |= 0x00010000;
+               break;
+       case NV_MEM_TARGET_PCI:
+               flags0 |= 0x00020000;
+               break;
+       case NV_MEM_TARGET_PCI_NOSNOOP:
+               flags0 |= 0x00030000;
+               break;
+       default:
                return -EINVAL;
+       }
 
-       return 0;
-}
+       switch (dmaobj->access) {
+       case NV_MEM_ACCESS_VM:
+               break;
+       case NV_MEM_ACCESS_RO:
+               flags0 |= 0x00040000;
+               break;
+       case NV_MEM_ACCESS_WO:
+       case NV_MEM_ACCESS_RW:
+               flags0 |= 0x00080000;
+               break;
+       }
 
-static struct nouveau_ofuncs
-nvc0_dmaobj_ofuncs = {
-       .ctor = nvc0_dmaobj_ctor,
-       .dtor = _nouveau_dmaobj_dtor,
-       .init = _nouveau_dmaobj_init,
-       .fini = _nouveau_dmaobj_fini,
-};
+       ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+       if (ret == 0) {
+               nv_wo32(*pgpuobj, 0x00, flags0);
+               nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
+               nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
+               nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
+                                       upper_32_bits(dmaobj->start));
+               nv_wo32(*pgpuobj, 0x10, 0x00000000);
+               nv_wo32(*pgpuobj, 0x14, flags5);
+       }
 
-static struct nouveau_oclass
-nvc0_dmaobj_sclass[] = {
-       { 0x0002, &nvc0_dmaobj_ofuncs },
-       { 0x0003, &nvc0_dmaobj_ofuncs },
-       { 0x003d, &nvc0_dmaobj_ofuncs },
-       {}
-};
+       return ret;
+}
 
 static int
 nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -83,7 +126,8 @@ nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       priv->base.base.sclass = nvc0_dmaobj_sclass;
+       nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
+       priv->base.bind = nvc0_dmaobj_bind;
        return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
new file mode 100644 (file)
index 0000000..d152875
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/device.h>
+#include <core/gpuobj.h>
+#include <core/class.h>
+
+#include <subdev/fb.h>
+#include <engine/dmaobj.h>
+
+struct nvd0_dmaeng_priv {
+       struct nouveau_dmaeng base;
+};
+
+static int
+nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+                struct nouveau_object *parent,
+                struct nouveau_dmaobj *dmaobj,
+                struct nouveau_gpuobj **pgpuobj)
+{
+       u32 flags0 = 0x00000000;
+       int ret;
+
+       if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+               switch (nv_mclass(parent->parent)) {
+               case NVD0_DISP_MAST_CLASS:
+               case NVD0_DISP_SYNC_CLASS:
+               case NVD0_DISP_OVLY_CLASS:
+               case NVE0_DISP_MAST_CLASS:
+               case NVE0_DISP_SYNC_CLASS:
+               case NVE0_DISP_OVLY_CLASS:
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       } else
+               return 0;
+
+       if (!(dmaobj->conf0 & NVD0_DMA_CONF0_ENABLE)) {
+               if (dmaobj->target == NV_MEM_TARGET_VM) {
+                       dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_VM;
+                       dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_LP;
+               } else {
+                       dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_LINEAR;
+                       dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_SP;
+               }
+       }
+
+       flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_TYPE) << 20;
+       flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_PAGE) >> 4;
+
+       switch (dmaobj->target) {
+       case NV_MEM_TARGET_VRAM:
+               flags0 |= 0x00000009;
+               break;
+       default:
+               return -EINVAL;
+               break;
+       }
+
+       ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+       if (ret == 0) {
+               nv_wo32(*pgpuobj, 0x00, flags0);
+               nv_wo32(*pgpuobj, 0x04, dmaobj->start >> 8);
+               nv_wo32(*pgpuobj, 0x08, dmaobj->limit >> 8);
+               nv_wo32(*pgpuobj, 0x0c, 0x00000000);
+               nv_wo32(*pgpuobj, 0x10, 0x00000000);
+               nv_wo32(*pgpuobj, 0x14, 0x00000000);
+       }
+
+       return ret;
+}
+
+static int
+nvd0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+                struct nouveau_oclass *oclass, void *data, u32 size,
+                struct nouveau_object **pobject)
+{
+       struct nvd0_dmaeng_priv *priv;
+       int ret;
+
+       ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
+       priv->base.bind = nvd0_dmaobj_bind;
+       return 0;
+}
+
+struct nouveau_oclass
+nvd0_dmaeng_oclass = {
+       .handle = NV_ENGINE(DMAOBJ, 0xd0),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nvd0_dmaeng_ctor,
+               .dtor = _nouveau_dmaeng_dtor,
+               .init = _nouveau_dmaeng_init,
+               .fini = _nouveau_dmaeng_fini,
+       },
+};
index bbb43c6..c2b9db3 100644 (file)
@@ -24,6 +24,7 @@
 
 #include <core/object.h>
 #include <core/handle.h>
+#include <core/class.h>
 
 #include <engine/dmaobj.h>
 #include <engine/fifo.h>
@@ -33,7 +34,7 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
                             struct nouveau_object *engine,
                             struct nouveau_oclass *oclass,
                             int bar, u32 addr, u32 size, u32 pushbuf,
-                            u32 engmask, int len, void **ptr)
+                            u64 engmask, int len, void **ptr)
 {
        struct nouveau_device *device = nv_device(engine);
        struct nouveau_fifo *priv = (void *)engine;
@@ -56,18 +57,16 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
 
        dmaeng = (void *)chan->pushdma->base.engine;
        switch (chan->pushdma->base.oclass->handle) {
-       case 0x0002:
-       case 0x003d:
+       case NV_DMA_FROM_MEMORY_CLASS:
+       case NV_DMA_IN_MEMORY_CLASS:
                break;
        default:
                return -EINVAL;
        }
 
-       if (dmaeng->bind) {
-               ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
-               if (ret)
-                       return ret;
-       }
+       ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
+       if (ret)
+               return ret;
 
        /* find a free fifo channel */
        spin_lock_irqsave(&priv->lock, flags);
@@ -119,14 +118,14 @@ _nouveau_fifo_channel_dtor(struct nouveau_object *object)
 }
 
 u32
-_nouveau_fifo_channel_rd32(struct nouveau_object *object, u32 addr)
+_nouveau_fifo_channel_rd32(struct nouveau_object *object, u64 addr)
 {
        struct nouveau_fifo_chan *chan = (void *)object;
        return ioread32_native(chan->user + addr);
 }
 
 void
-_nouveau_fifo_channel_wr32(struct nouveau_object *object, u32 addr, u32 data)
+_nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
        struct nouveau_fifo_chan *chan = (void *)object;
        iowrite32_native(data, chan->user + addr);
index ea76e3e..a47a854 100644 (file)
@@ -126,9 +126,9 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent,
 
        ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
                                          0x10000, args->pushbuf,
-                                         (1 << NVDEV_ENGINE_DMAOBJ) |
-                                         (1 << NVDEV_ENGINE_SW) |
-                                         (1 << NVDEV_ENGINE_GR), &chan);
+                                         (1ULL << NVDEV_ENGINE_DMAOBJ) |
+                                         (1ULL << NVDEV_ENGINE_SW) |
+                                         (1ULL << NVDEV_ENGINE_GR), &chan);
        *pobject = nv_object(chan);
        if (ret)
                return ret;
@@ -440,7 +440,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
                        }
 
                        if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
-                               nv_info(priv, "CACHE_ERROR - Ch %d/%d "
+                               nv_error(priv, "CACHE_ERROR - Ch %d/%d "
                                              "Mthd 0x%04x Data 0x%08x\n",
                                        chid, (mthd >> 13) & 7, mthd & 0x1ffc,
                                        data);
@@ -476,7 +476,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
                                u32 ib_get = nv_rd32(priv, 0x003334);
                                u32 ib_put = nv_rd32(priv, 0x003330);
 
-                               nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
+                               nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
                                     "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
                                     "State 0x%08x (err: %s) Push 0x%08x\n",
                                        chid, ho_get, dma_get, ho_put,
@@ -494,7 +494,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
                                        nv_wr32(priv, 0x003334, ib_put);
                                }
                        } else {
-                               nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
+                               nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
                                             "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
                                        chid, dma_get, dma_put, state,
                                        nv_dma_state_err(state), push);
@@ -525,14 +525,13 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
 
                if (device->card_type == NV_50) {
                        if (status & 0x00000010) {
-                               nv50_fb_trap(nouveau_fb(priv), 1);
                                status &= ~0x00000010;
                                nv_wr32(priv, 0x002100, 0x00000010);
                        }
                }
 
                if (status) {
-                       nv_info(priv, "unknown intr 0x%08x, ch %d\n",
+                       nv_warn(priv, "unknown intr 0x%08x, ch %d\n",
                                status, chid);
                        nv_wr32(priv, NV03_PFIFO_INTR_0, status);
                        status = 0;
@@ -542,7 +541,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
        }
 
        if (status) {
-               nv_info(priv, "still angry after %d spins, halt\n", cnt);
+               nv_error(priv, "still angry after %d spins, halt\n", cnt);
                nv_wr32(priv, 0x002140, 0);
                nv_wr32(priv, 0x000140, 0);
        }
index 4ba7542..2c927c1 100644 (file)
@@ -69,9 +69,9 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent,
 
        ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
                                          0x10000, args->pushbuf,
-                                         (1 << NVDEV_ENGINE_DMAOBJ) |
-                                         (1 << NVDEV_ENGINE_SW) |
-                                         (1 << NVDEV_ENGINE_GR), &chan);
+                                         (1ULL << NVDEV_ENGINE_DMAOBJ) |
+                                         (1ULL << NVDEV_ENGINE_SW) |
+                                         (1ULL << NVDEV_ENGINE_GR), &chan);
        *pobject = nv_object(chan);
        if (ret)
                return ret;
index b96e6b0..a9cb51d 100644 (file)
@@ -74,10 +74,10 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent,
 
        ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
                                          0x10000, args->pushbuf,
-                                         (1 << NVDEV_ENGINE_DMAOBJ) |
-                                         (1 << NVDEV_ENGINE_SW) |
-                                         (1 << NVDEV_ENGINE_GR) |
-                                         (1 << NVDEV_ENGINE_MPEG), /* NV31- */
+                                         (1ULL << NVDEV_ENGINE_DMAOBJ) |
+                                         (1ULL << NVDEV_ENGINE_SW) |
+                                         (1ULL << NVDEV_ENGINE_GR) |
+                                         (1ULL << NVDEV_ENGINE_MPEG), /* NV31- */
                                          &chan);
        *pobject = nv_object(chan);
        if (ret)
index 559c3b4..2b1f917 100644 (file)
@@ -192,10 +192,10 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent,
 
        ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
                                          0x1000, args->pushbuf,
-                                         (1 << NVDEV_ENGINE_DMAOBJ) |
-                                         (1 << NVDEV_ENGINE_SW) |
-                                         (1 << NVDEV_ENGINE_GR) |
-                                         (1 << NVDEV_ENGINE_MPEG), &chan);
+                                         (1ULL << NVDEV_ENGINE_DMAOBJ) |
+                                         (1ULL << NVDEV_ENGINE_SW) |
+                                         (1ULL << NVDEV_ENGINE_GR) |
+                                         (1ULL << NVDEV_ENGINE_MPEG), &chan);
        *pobject = nv_object(chan);
        if (ret)
                return ret;
index 536e763..bd09636 100644 (file)
@@ -112,14 +112,6 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
                return -EINVAL;
        }
 
-       nv_wo32(base->eng, addr + 0x00, 0x00000000);
-       nv_wo32(base->eng, addr + 0x04, 0x00000000);
-       nv_wo32(base->eng, addr + 0x08, 0x00000000);
-       nv_wo32(base->eng, addr + 0x0c, 0x00000000);
-       nv_wo32(base->eng, addr + 0x10, 0x00000000);
-       nv_wo32(base->eng, addr + 0x14, 0x00000000);
-       bar->flush(bar);
-
        /* HW bug workaround:
         *
         * PFIFO will hang forever if the connected engines don't report
@@ -141,8 +133,18 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
                if (suspend)
                        ret = -EBUSY;
        }
-
        nv_wr32(priv, 0x00b860, me);
+
+       if (ret == 0) {
+               nv_wo32(base->eng, addr + 0x00, 0x00000000);
+               nv_wo32(base->eng, addr + 0x04, 0x00000000);
+               nv_wo32(base->eng, addr + 0x08, 0x00000000);
+               nv_wo32(base->eng, addr + 0x0c, 0x00000000);
+               nv_wo32(base->eng, addr + 0x10, 0x00000000);
+               nv_wo32(base->eng, addr + 0x14, 0x00000000);
+               bar->flush(bar);
+       }
+
        return ret;
 }
 
@@ -194,10 +196,10 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
 
        ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
                                          0x2000, args->pushbuf,
-                                         (1 << NVDEV_ENGINE_DMAOBJ) |
-                                         (1 << NVDEV_ENGINE_SW) |
-                                         (1 << NVDEV_ENGINE_GR) |
-                                         (1 << NVDEV_ENGINE_MPEG), &chan);
+                                         (1ULL << NVDEV_ENGINE_DMAOBJ) |
+                                         (1ULL << NVDEV_ENGINE_SW) |
+                                         (1ULL << NVDEV_ENGINE_GR) |
+                                         (1ULL << NVDEV_ENGINE_MPEG), &chan);
        *pobject = nv_object(chan);
        if (ret)
                return ret;
@@ -247,10 +249,10 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
 
        ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
                                          0x2000, args->pushbuf,
-                                         (1 << NVDEV_ENGINE_DMAOBJ) |
-                                         (1 << NVDEV_ENGINE_SW) |
-                                         (1 << NVDEV_ENGINE_GR) |
-                                         (1 << NVDEV_ENGINE_MPEG), &chan);
+                                         (1ULL << NVDEV_ENGINE_DMAOBJ) |
+                                         (1ULL << NVDEV_ENGINE_SW) |
+                                         (1ULL << NVDEV_ENGINE_GR) |
+                                         (1ULL << NVDEV_ENGINE_MPEG), &chan);
        *pobject = nv_object(chan);
        if (ret)
                return ret;
index b4fd26d..1eb1c51 100644 (file)
@@ -95,14 +95,6 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
                return -EINVAL;
        }
 
-       nv_wo32(base->eng, addr + 0x00, 0x00000000);
-       nv_wo32(base->eng, addr + 0x04, 0x00000000);
-       nv_wo32(base->eng, addr + 0x08, 0x00000000);
-       nv_wo32(base->eng, addr + 0x0c, 0x00000000);
-       nv_wo32(base->eng, addr + 0x10, 0x00000000);
-       nv_wo32(base->eng, addr + 0x14, 0x00000000);
-       bar->flush(bar);
-
        save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn);
        nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
        done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
@@ -112,6 +104,14 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
                if (suspend)
                        return -EBUSY;
        }
+
+       nv_wo32(base->eng, addr + 0x00, 0x00000000);
+       nv_wo32(base->eng, addr + 0x04, 0x00000000);
+       nv_wo32(base->eng, addr + 0x08, 0x00000000);
+       nv_wo32(base->eng, addr + 0x0c, 0x00000000);
+       nv_wo32(base->eng, addr + 0x10, 0x00000000);
+       nv_wo32(base->eng, addr + 0x14, 0x00000000);
+       bar->flush(bar);
        return 0;
 }
 
@@ -163,17 +163,17 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
 
        ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
                                          0x2000, args->pushbuf,
-                                         (1 << NVDEV_ENGINE_DMAOBJ) |
-                                         (1 << NVDEV_ENGINE_SW) |
-                                         (1 << NVDEV_ENGINE_GR) |
-                                         (1 << NVDEV_ENGINE_MPEG) |
-                                         (1 << NVDEV_ENGINE_ME) |
-                                         (1 << NVDEV_ENGINE_VP) |
-                                         (1 << NVDEV_ENGINE_CRYPT) |
-                                         (1 << NVDEV_ENGINE_BSP) |
-                                         (1 << NVDEV_ENGINE_PPP) |
-                                         (1 << NVDEV_ENGINE_COPY0) |
-                                         (1 << NVDEV_ENGINE_UNK1C1), &chan);
+                                         (1ULL << NVDEV_ENGINE_DMAOBJ) |
+                                         (1ULL << NVDEV_ENGINE_SW) |
+                                         (1ULL << NVDEV_ENGINE_GR) |
+                                         (1ULL << NVDEV_ENGINE_MPEG) |
+                                         (1ULL << NVDEV_ENGINE_ME) |
+                                         (1ULL << NVDEV_ENGINE_VP) |
+                                         (1ULL << NVDEV_ENGINE_CRYPT) |
+                                         (1ULL << NVDEV_ENGINE_BSP) |
+                                         (1ULL << NVDEV_ENGINE_PPP) |
+                                         (1ULL << NVDEV_ENGINE_COPY0) |
+                                         (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
        *pobject = nv_object(chan);
        if (ret)
                return ret;
@@ -225,17 +225,17 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
 
        ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
                                          0x2000, args->pushbuf,
-                                         (1 << NVDEV_ENGINE_DMAOBJ) |
-                                         (1 << NVDEV_ENGINE_SW) |
-                                         (1 << NVDEV_ENGINE_GR) |
-                                         (1 << NVDEV_ENGINE_MPEG) |
-                                         (1 << NVDEV_ENGINE_ME) |
-                                         (1 << NVDEV_ENGINE_VP) |
-                                         (1 << NVDEV_ENGINE_CRYPT) |
-                                         (1 << NVDEV_ENGINE_BSP) |
-                                         (1 << NVDEV_ENGINE_PPP) |
-                                         (1 << NVDEV_ENGINE_COPY0) |
-                                         (1 << NVDEV_ENGINE_UNK1C1), &chan);
+                                         (1ULL << NVDEV_ENGINE_DMAOBJ) |
+                                         (1ULL << NVDEV_ENGINE_SW) |
+                                         (1ULL << NVDEV_ENGINE_GR) |
+                                         (1ULL << NVDEV_ENGINE_MPEG) |
+                                         (1ULL << NVDEV_ENGINE_ME) |
+                                         (1ULL << NVDEV_ENGINE_VP) |
+                                         (1ULL << NVDEV_ENGINE_CRYPT) |
+                                         (1ULL << NVDEV_ENGINE_BSP) |
+                                         (1ULL << NVDEV_ENGINE_PPP) |
+                                         (1ULL << NVDEV_ENGINE_COPY0) |
+                                         (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
        *pobject = nv_object(chan);
        if (ret)
                return ret;
index 6f21be6..b4365dd 100644 (file)
@@ -103,6 +103,9 @@ nvc0_fifo_context_attach(struct nouveau_object *parent,
        case NVDEV_ENGINE_GR   : addr = 0x0210; break;
        case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
        case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
+       case NVDEV_ENGINE_BSP  : addr = 0x0270; break;
+       case NVDEV_ENGINE_VP   : addr = 0x0250; break;
+       case NVDEV_ENGINE_PPP  : addr = 0x0260; break;
        default:
                return -EINVAL;
        }
@@ -137,14 +140,13 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
        case NVDEV_ENGINE_GR   : addr = 0x0210; break;
        case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
        case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
+       case NVDEV_ENGINE_BSP  : addr = 0x0270; break;
+       case NVDEV_ENGINE_VP   : addr = 0x0250; break;
+       case NVDEV_ENGINE_PPP  : addr = 0x0260; break;
        default:
                return -EINVAL;
        }
 
-       nv_wo32(base, addr + 0x00, 0x00000000);
-       nv_wo32(base, addr + 0x04, 0x00000000);
-       bar->flush(bar);
-
        nv_wr32(priv, 0x002634, chan->base.chid);
        if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
                nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
@@ -152,6 +154,9 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
                        return -EBUSY;
        }
 
+       nv_wo32(base, addr + 0x00, 0x00000000);
+       nv_wo32(base, addr + 0x04, 0x00000000);
+       bar->flush(bar);
        return 0;
 }
 
@@ -175,10 +180,13 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent,
        ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
                                          priv->user.bar.offset, 0x1000,
                                          args->pushbuf,
-                                         (1 << NVDEV_ENGINE_SW) |
-                                         (1 << NVDEV_ENGINE_GR) |
-                                         (1 << NVDEV_ENGINE_COPY0) |
-                                         (1 << NVDEV_ENGINE_COPY1), &chan);
+                                         (1ULL << NVDEV_ENGINE_SW) |
+                                         (1ULL << NVDEV_ENGINE_GR) |
+                                         (1ULL << NVDEV_ENGINE_COPY0) |
+                                         (1ULL << NVDEV_ENGINE_COPY1) |
+                                         (1ULL << NVDEV_ENGINE_BSP) |
+                                         (1ULL << NVDEV_ENGINE_VP) |
+                                         (1ULL << NVDEV_ENGINE_PPP), &chan);
        *pobject = nv_object(chan);
        if (ret)
                return ret;
@@ -494,7 +502,7 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
        u32 stat = nv_rd32(priv, 0x002100) & mask;
 
        if (stat & 0x00000100) {
-               nv_info(priv, "unknown status 0x00000100\n");
+               nv_warn(priv, "unknown status 0x00000100\n");
                nv_wr32(priv, 0x002100, 0x00000100);
                stat &= ~0x00000100;
        }
index 36e81b6..c930da9 100644 (file)
 #include <engine/dmaobj.h>
 #include <engine/fifo.h>
 
-#define _(a,b) { (a), ((1 << (a)) | (b)) }
+#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
 static const struct {
-       int subdev;
-       u32 mask;
+       u64 subdev;
+       u64 mask;
 } fifo_engine[] = {
-       _(NVDEV_ENGINE_GR      , (1 << NVDEV_ENGINE_SW)),
+       _(NVDEV_ENGINE_GR      , (1ULL << NVDEV_ENGINE_SW)),
        _(NVDEV_ENGINE_VP      , 0),
        _(NVDEV_ENGINE_PPP     , 0),
        _(NVDEV_ENGINE_BSP     , 0),
@@ -138,6 +138,9 @@ nve0_fifo_context_attach(struct nouveau_object *parent,
        case NVDEV_ENGINE_GR   :
        case NVDEV_ENGINE_COPY0:
        case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
+       case NVDEV_ENGINE_BSP  : addr = 0x0270; break;
+       case NVDEV_ENGINE_VP   : addr = 0x0250; break;
+       case NVDEV_ENGINE_PPP  : addr = 0x0260; break;
        default:
                return -EINVAL;
        }
@@ -172,14 +175,13 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
        case NVDEV_ENGINE_GR   :
        case NVDEV_ENGINE_COPY0:
        case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
+       case NVDEV_ENGINE_BSP  : addr = 0x0270; break;
+       case NVDEV_ENGINE_VP   : addr = 0x0250; break;
+       case NVDEV_ENGINE_PPP  : addr = 0x0260; break;
        default:
                return -EINVAL;
        }
 
-       nv_wo32(base, addr + 0x00, 0x00000000);
-       nv_wo32(base, addr + 0x04, 0x00000000);
-       bar->flush(bar);
-
        nv_wr32(priv, 0x002634, chan->base.chid);
        if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
                nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
@@ -187,6 +189,9 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
                        return -EBUSY;
        }
 
+       nv_wo32(base, addr + 0x00, 0x00000000);
+       nv_wo32(base, addr + 0x04, 0x00000000);
+       bar->flush(bar);
        return 0;
 }
 
index e45035e..7bbb1e1 100644 (file)
@@ -669,21 +669,27 @@ nv40_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem)
                           });
 }
 
-void
+int
 nv40_grctx_init(struct nouveau_device *device, u32 *size)
 {
-       u32 ctxprog[256], i;
+       u32 *ctxprog = kmalloc(256 * 4, GFP_KERNEL), i;
        struct nouveau_grctx ctx = {
                .device = device,
                .mode = NOUVEAU_GRCTX_PROG,
                .data = ctxprog,
-               .ctxprog_max = ARRAY_SIZE(ctxprog)
+               .ctxprog_max = 256,
        };
 
+       if (!ctxprog)
+               return -ENOMEM;
+
        nv40_grctx_generate(&ctx);
 
        nv_wr32(device, 0x400324, 0);
        for (i = 0; i < ctx.ctxprog_len; i++)
                nv_wr32(device, 0x400328, ctxprog[i]);
        *size = ctx.ctxvals_pos * 4;
+
+       kfree(ctxprog);
+       return 0;
 }
index 6185282..e30a9c5 100644 (file)
@@ -787,168 +787,168 @@ nv01_graph_mthd_bind_chroma(struct nouveau_object *object, u32 mthd,
 
 static struct nouveau_omthds
 nv03_graph_gdi_omthds[] = {
-       { 0x0184, nv01_graph_mthd_bind_patt },
-       { 0x0188, nv04_graph_mthd_bind_rop },
-       { 0x018c, nv04_graph_mthd_bind_beta1 },
-       { 0x0190, nv04_graph_mthd_bind_surf_dst },
-       { 0x02fc, nv04_graph_mthd_set_operation },
+       { 0x0184, 0x0184, nv01_graph_mthd_bind_patt },
+       { 0x0188, 0x0188, nv04_graph_mthd_bind_rop },
+       { 0x018c, 0x018c, nv04_graph_mthd_bind_beta1 },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_dst },
+       { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv04_graph_gdi_omthds[] = {
-       { 0x0188, nv04_graph_mthd_bind_patt },
-       { 0x018c, nv04_graph_mthd_bind_rop },
-       { 0x0190, nv04_graph_mthd_bind_beta1 },
-       { 0x0194, nv04_graph_mthd_bind_beta4 },
-       { 0x0198, nv04_graph_mthd_bind_surf2d },
-       { 0x02fc, nv04_graph_mthd_set_operation },
+       { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+       { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+       { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+       { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv01_graph_blit_omthds[] = {
-       { 0x0184, nv01_graph_mthd_bind_chroma },
-       { 0x0188, nv01_graph_mthd_bind_clip },
-       { 0x018c, nv01_graph_mthd_bind_patt },
-       { 0x0190, nv04_graph_mthd_bind_rop },
-       { 0x0194, nv04_graph_mthd_bind_beta1 },
-       { 0x0198, nv04_graph_mthd_bind_surf_dst },
-       { 0x019c, nv04_graph_mthd_bind_surf_src },
-       { 0x02fc, nv04_graph_mthd_set_operation },
+       { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+       { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+       { 0x018c, 0x018c, nv01_graph_mthd_bind_patt },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+       { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst },
+       { 0x019c, 0x019c, nv04_graph_mthd_bind_surf_src },
+       { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv04_graph_blit_omthds[] = {
-       { 0x0184, nv01_graph_mthd_bind_chroma },
-       { 0x0188, nv01_graph_mthd_bind_clip },
-       { 0x018c, nv04_graph_mthd_bind_patt },
-       { 0x0190, nv04_graph_mthd_bind_rop },
-       { 0x0194, nv04_graph_mthd_bind_beta1 },
-       { 0x0198, nv04_graph_mthd_bind_beta4 },
-       { 0x019c, nv04_graph_mthd_bind_surf2d },
-       { 0x02fc, nv04_graph_mthd_set_operation },
+       { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+       { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+       { 0x018c, 0x018c, nv04_graph_mthd_bind_patt },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+       { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 },
+       { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d },
+       { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv04_graph_iifc_omthds[] = {
-       { 0x0188, nv01_graph_mthd_bind_chroma },
-       { 0x018c, nv01_graph_mthd_bind_clip },
-       { 0x0190, nv04_graph_mthd_bind_patt },
-       { 0x0194, nv04_graph_mthd_bind_rop },
-       { 0x0198, nv04_graph_mthd_bind_beta1 },
-       { 0x019c, nv04_graph_mthd_bind_beta4 },
-       { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
-       { 0x03e4, nv04_graph_mthd_set_operation },
+       { 0x0188, 0x0188, nv01_graph_mthd_bind_chroma },
+       { 0x018c, 0x018c, nv01_graph_mthd_bind_clip },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_patt },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_rop },
+       { 0x0198, 0x0198, nv04_graph_mthd_bind_beta1 },
+       { 0x019c, 0x019c, nv04_graph_mthd_bind_beta4 },
+       { 0x01a0, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
+       { 0x03e4, 0x03e4, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv01_graph_ifc_omthds[] = {
-       { 0x0184, nv01_graph_mthd_bind_chroma },
-       { 0x0188, nv01_graph_mthd_bind_clip },
-       { 0x018c, nv01_graph_mthd_bind_patt },
-       { 0x0190, nv04_graph_mthd_bind_rop },
-       { 0x0194, nv04_graph_mthd_bind_beta1 },
-       { 0x0198, nv04_graph_mthd_bind_surf_dst },
-       { 0x02fc, nv04_graph_mthd_set_operation },
+       { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+       { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+       { 0x018c, 0x018c, nv01_graph_mthd_bind_patt },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+       { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst },
+       { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv04_graph_ifc_omthds[] = {
-       { 0x0184, nv01_graph_mthd_bind_chroma },
-       { 0x0188, nv01_graph_mthd_bind_clip },
-       { 0x018c, nv04_graph_mthd_bind_patt },
-       { 0x0190, nv04_graph_mthd_bind_rop },
-       { 0x0194, nv04_graph_mthd_bind_beta1 },
-       { 0x0198, nv04_graph_mthd_bind_beta4 },
-       { 0x019c, nv04_graph_mthd_bind_surf2d },
-       { 0x02fc, nv04_graph_mthd_set_operation },
+       { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+       { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+       { 0x018c, 0x018c, nv04_graph_mthd_bind_patt },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+       { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 },
+       { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d },
+       { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv03_graph_sifc_omthds[] = {
-       { 0x0184, nv01_graph_mthd_bind_chroma },
-       { 0x0188, nv01_graph_mthd_bind_patt },
-       { 0x018c, nv04_graph_mthd_bind_rop },
-       { 0x0190, nv04_graph_mthd_bind_beta1 },
-       { 0x0194, nv04_graph_mthd_bind_surf_dst },
-       { 0x02fc, nv04_graph_mthd_set_operation },
+       { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+       { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+       { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+       { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv04_graph_sifc_omthds[] = {
-       { 0x0184, nv01_graph_mthd_bind_chroma },
-       { 0x0188, nv04_graph_mthd_bind_patt },
-       { 0x018c, nv04_graph_mthd_bind_rop },
-       { 0x0190, nv04_graph_mthd_bind_beta1 },
-       { 0x0194, nv04_graph_mthd_bind_beta4 },
-       { 0x0198, nv04_graph_mthd_bind_surf2d },
-       { 0x02fc, nv04_graph_mthd_set_operation },
+       { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+       { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+       { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+       { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+       { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv03_graph_sifm_omthds[] = {
-       { 0x0188, nv01_graph_mthd_bind_patt },
-       { 0x018c, nv04_graph_mthd_bind_rop },
-       { 0x0190, nv04_graph_mthd_bind_beta1 },
-       { 0x0194, nv04_graph_mthd_bind_surf_dst },
-       { 0x0304, nv04_graph_mthd_set_operation },
+       { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+       { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+       { 0x0304, 0x0304, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv04_graph_sifm_omthds[] = {
-       { 0x0188, nv04_graph_mthd_bind_patt },
-       { 0x018c, nv04_graph_mthd_bind_rop },
-       { 0x0190, nv04_graph_mthd_bind_beta1 },
-       { 0x0194, nv04_graph_mthd_bind_beta4 },
-       { 0x0198, nv04_graph_mthd_bind_surf2d },
-       { 0x0304, nv04_graph_mthd_set_operation },
+       { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+       { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+       { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+       { 0x0304, 0x0304, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv04_graph_surf3d_omthds[] = {
-       { 0x02f8, nv04_graph_mthd_surf3d_clip_h },
-       { 0x02fc, nv04_graph_mthd_surf3d_clip_v },
+       { 0x02f8, 0x02f8, nv04_graph_mthd_surf3d_clip_h },
+       { 0x02fc, 0x02fc, nv04_graph_mthd_surf3d_clip_v },
        {}
 };
 
 static struct nouveau_omthds
 nv03_graph_ttri_omthds[] = {
-       { 0x0188, nv01_graph_mthd_bind_clip },
-       { 0x018c, nv04_graph_mthd_bind_surf_color },
-       { 0x0190, nv04_graph_mthd_bind_surf_zeta },
+       { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+       { 0x018c, 0x018c, nv04_graph_mthd_bind_surf_color },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_zeta },
        {}
 };
 
 static struct nouveau_omthds
 nv01_graph_prim_omthds[] = {
-       { 0x0184, nv01_graph_mthd_bind_clip },
-       { 0x0188, nv01_graph_mthd_bind_patt },
-       { 0x018c, nv04_graph_mthd_bind_rop },
-       { 0x0190, nv04_graph_mthd_bind_beta1 },
-       { 0x0194, nv04_graph_mthd_bind_surf_dst },
-       { 0x02fc, nv04_graph_mthd_set_operation },
+       { 0x0184, 0x0184, nv01_graph_mthd_bind_clip },
+       { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+       { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+       { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv04_graph_prim_omthds[] = {
-       { 0x0184, nv01_graph_mthd_bind_clip },
-       { 0x0188, nv04_graph_mthd_bind_patt },
-       { 0x018c, nv04_graph_mthd_bind_rop },
-       { 0x0190, nv04_graph_mthd_bind_beta1 },
-       { 0x0194, nv04_graph_mthd_bind_beta4 },
-       { 0x0198, nv04_graph_mthd_bind_surf2d },
-       { 0x02fc, nv04_graph_mthd_set_operation },
+       { 0x0184, 0x0184, nv01_graph_mthd_bind_clip },
+       { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+       { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+       { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+       { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
        {}
 };
 
index 92521c8..5c0f843 100644 (file)
@@ -570,11 +570,11 @@ nv17_graph_mthd_lma_enable(struct nouveau_object *object, u32 mthd,
 
 static struct nouveau_omthds
 nv17_celcius_omthds[] = {
-       { 0x1638, nv17_graph_mthd_lma_window },
-       { 0x163c, nv17_graph_mthd_lma_window },
-       { 0x1640, nv17_graph_mthd_lma_window },
-       { 0x1644, nv17_graph_mthd_lma_window },
-       { 0x1658, nv17_graph_mthd_lma_enable },
+       { 0x1638, 0x1638, nv17_graph_mthd_lma_window },
+       { 0x163c, 0x163c, nv17_graph_mthd_lma_window },
+       { 0x1640, 0x1640, nv17_graph_mthd_lma_window },
+       { 0x1644, 0x1644, nv17_graph_mthd_lma_window },
+       { 0x1658, 0x1658, nv17_graph_mthd_lma_enable },
        {}
 };
 
index 8f3f619..5b20401 100644 (file)
@@ -183,7 +183,7 @@ nv20_graph_tile_prog(struct nouveau_engine *engine, int i)
        nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
        nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr);
 
-       if (nv_device(engine)->card_type == NV_20) {
+       if (nv_device(engine)->chipset != 0x34) {
                nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
                nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
                nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp);
@@ -224,14 +224,14 @@ nv20_graph_intr(struct nouveau_subdev *subdev)
        nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
 
        if (show) {
-               nv_info(priv, "");
+               nv_error(priv, "");
                nouveau_bitfield_print(nv10_graph_intr_name, show);
                printk(" nsource:");
                nouveau_bitfield_print(nv04_graph_nsource, nsource);
                printk(" nstatus:");
                nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
                printk("\n");
-               nv_info(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
+               nv_error(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
                        chid, subc, class, mthd, data);
        }
 
index 8d00210..0b36dd3 100644 (file)
@@ -156,8 +156,8 @@ nv40_graph_context_ctor(struct nouveau_object *parent,
 static int
 nv40_graph_context_fini(struct nouveau_object *object, bool suspend)
 {
-       struct nv04_graph_priv *priv = (void *)object->engine;
-       struct nv04_graph_chan *chan = (void *)object;
+       struct nv40_graph_priv *priv = (void *)object->engine;
+       struct nv40_graph_chan *chan = (void *)object;
        u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4;
        int ret = 0;
 
@@ -216,10 +216,10 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
 
        switch (nv_device(priv)->chipset) {
        case 0x40:
-       case 0x41: /* guess */
+       case 0x41:
        case 0x42:
        case 0x43:
-       case 0x45: /* guess */
+       case 0x45:
        case 0x4e:
                nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
                nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
@@ -227,6 +227,21 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
                nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
                nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
                nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
+               switch (nv_device(priv)->chipset) {
+               case 0x40:
+               case 0x45:
+                       nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+                       nv_wr32(priv, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
+                       break;
+               case 0x41:
+               case 0x42:
+               case 0x43:
+                       nv_wr32(priv, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
+                       nv_wr32(priv, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
+                       break;
+               default:
+                       break;
+               }
                break;
        case 0x44:
        case 0x4a:
@@ -235,18 +250,31 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
                nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
                break;
        case 0x46:
+       case 0x4c:
        case 0x47:
        case 0x49:
        case 0x4b:
-       case 0x4c:
+       case 0x63:
        case 0x67:
-       default:
+       case 0x68:
                nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch);
                nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit);
                nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr);
                nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
                nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
                nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
+               switch (nv_device(priv)->chipset) {
+               case 0x47:
+               case 0x49:
+               case 0x4b:
+                       nv_wr32(priv, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
+                       nv_wr32(priv, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
+                       break;
+               default:
+                       break;
+               }
+               break;
+       default:
                break;
        }
 
@@ -293,7 +321,7 @@ nv40_graph_intr(struct nouveau_subdev *subdev)
        nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
 
        if (show) {
-               nv_info(priv, "");
+               nv_error(priv, "");
                nouveau_bitfield_print(nv10_graph_intr_name, show);
                printk(" nsource:");
                nouveau_bitfield_print(nv04_graph_nsource, nsource);
@@ -346,7 +374,9 @@ nv40_graph_init(struct nouveau_object *object)
                return ret;
 
        /* generate and upload context program */
-       nv40_grctx_init(nv_device(priv), &priv->size);
+       ret = nv40_grctx_init(nv_device(priv), &priv->size);
+       if (ret)
+               return ret;
 
        /* No context present currently */
        nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
index d2ac975..7da35a4 100644 (file)
@@ -15,7 +15,7 @@ nv44_graph_class(void *priv)
        return !(0x0baf & (1 << (device->chipset & 0x0f)));
 }
 
-void nv40_grctx_init(struct nouveau_device *, u32 *size);
+int  nv40_grctx_init(struct nouveau_device *, u32 *size);
 void nv40_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *);
 
 #endif
index ab3b9dc..b1c3d83 100644 (file)
@@ -184,6 +184,65 @@ nv50_graph_tlb_flush(struct nouveau_engine *engine)
        return 0;
 }
 
+static const struct nouveau_bitfield nv50_pgraph_status[] = {
+       { 0x00000001, "BUSY" }, /* set when any bit is set */
+       { 0x00000002, "DISPATCH" },
+       { 0x00000004, "UNK2" },
+       { 0x00000008, "UNK3" },
+       { 0x00000010, "UNK4" },
+       { 0x00000020, "UNK5" },
+       { 0x00000040, "M2MF" },
+       { 0x00000080, "UNK7" },
+       { 0x00000100, "CTXPROG" },
+       { 0x00000200, "VFETCH" },
+       { 0x00000400, "CCACHE_UNK4" },
+       { 0x00000800, "STRMOUT_GSCHED_UNK5" },
+       { 0x00001000, "UNK14XX" },
+       { 0x00002000, "UNK24XX_CSCHED" },
+       { 0x00004000, "UNK1CXX" },
+       { 0x00008000, "CLIPID" },
+       { 0x00010000, "ZCULL" },
+       { 0x00020000, "ENG2D" },
+       { 0x00040000, "UNK34XX" },
+       { 0x00080000, "TPRAST" },
+       { 0x00100000, "TPROP" },
+       { 0x00200000, "TEX" },
+       { 0x00400000, "TPVP" },
+       { 0x00800000, "MP" },
+       { 0x01000000, "ROP" },
+       {}
+};
+
+static const char *const nv50_pgraph_vstatus_0[] = {
+       "VFETCH", "CCACHE", "UNK4", "UNK5", "GSCHED", "STRMOUT", "UNK14XX", NULL
+};
+
+static const char *const nv50_pgraph_vstatus_1[] = {
+       "TPRAST", "TPROP", "TEXTURE", "TPVP", "MP", NULL
+};
+
+static const char *const nv50_pgraph_vstatus_2[] = {
+       "UNK24XX", "CSCHED", "UNK1CXX", "CLIPID", "ZCULL", "ENG2D", "UNK34XX",
+       "ROP", NULL
+};
+
+static void nouveau_pgraph_vstatus_print(struct nv50_graph_priv *priv, int r,
+               const char *const units[], u32 status)
+{
+       int i;
+
+       nv_error(priv, "PGRAPH_VSTATUS%d: 0x%08x", r, status);
+
+       for (i = 0; units[i] && status; i++) {
+               if ((status & 7) == 1)
+                       pr_cont(" %s", units[i]);
+               status >>= 3;
+       }
+       if (status)
+               pr_cont(" (invalid: 0x%x)", status);
+       pr_cont("\n");
+}
+
 static int
 nv84_graph_tlb_flush(struct nouveau_engine *engine)
 {
@@ -219,10 +278,19 @@ nv84_graph_tlb_flush(struct nouveau_engine *engine)
                 !(timeout = ptimer->read(ptimer) - start > 2000000000));
 
        if (timeout) {
-               nv_error(priv, "PGRAPH TLB flush idle timeout fail: "
-                             "0x%08x 0x%08x 0x%08x 0x%08x\n",
-                        nv_rd32(priv, 0x400700), nv_rd32(priv, 0x400380),
-                        nv_rd32(priv, 0x400384), nv_rd32(priv, 0x400388));
+               nv_error(priv, "PGRAPH TLB flush idle timeout fail\n");
+
+               tmp = nv_rd32(priv, 0x400700);
+               nv_error(priv, "PGRAPH_STATUS  : 0x%08x", tmp);
+               nouveau_bitfield_print(nv50_pgraph_status, tmp);
+               pr_cont("\n");
+
+               nouveau_pgraph_vstatus_print(priv, 0, nv50_pgraph_vstatus_0,
+                               nv_rd32(priv, 0x400380));
+               nouveau_pgraph_vstatus_print(priv, 1, nv50_pgraph_vstatus_1,
+                               nv_rd32(priv, 0x400384));
+               nouveau_pgraph_vstatus_print(priv, 2, nv50_pgraph_vstatus_2,
+                               nv_rd32(priv, 0x400388));
        }
 
        nv50_vm_flush_engine(&engine->base, 0x00);
@@ -453,13 +521,13 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
                }
                if (ustatus) {
                        if (display)
-                               nv_info(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+                               nv_error(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
                }
                nv_wr32(priv, ustatus_addr, 0xc0000000);
        }
 
        if (!tps && display)
-               nv_info(priv, "%s - No TPs claiming errors?\n", name);
+               nv_warn(priv, "%s - No TPs claiming errors?\n", name);
 }
 
 static int
@@ -718,13 +786,12 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
        nv_wr32(priv, 0x400500, 0x00010001);
 
        if (show) {
-               nv_info(priv, "");
+               nv_error(priv, "");
                nouveau_bitfield_print(nv50_graph_intr_name, show);
                printk("\n");
                nv_error(priv, "ch %d [0x%010llx] subc %d class 0x%04x "
                               "mthd 0x%04x data 0x%08x\n",
                         chid, (u64)inst << 12, subc, class, mthd, data);
-               nv50_fb_trap(nouveau_fb(priv), 1);
        }
 
        if (nv_rd32(priv, 0x400824) & (1 << 31))
index c62f2d0..47a0208 100644 (file)
@@ -814,7 +814,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
                nv_wr32(priv, 0x41a100, 0x00000002);
                nv_wr32(priv, 0x409100, 0x00000002);
                if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
-                       nv_info(priv, "0x409800 wait failed\n");
+                       nv_warn(priv, "0x409800 wait failed\n");
 
                nv_wr32(priv, 0x409840, 0xffffffff);
                nv_wr32(priv, 0x409500, 0x7fffffff);
index 9c715a2..fde8e24 100644 (file)
 #define NV20_PGRAPH_TSIZE(i)                               (0x00400908 + (i*16))
 #define NV20_PGRAPH_TSTATUS(i)                             (0x0040090C + (i*16))
 #define NV20_PGRAPH_ZCOMP(i)                               (0x00400980 + 4*(i))
+#define NV41_PGRAPH_ZCOMP0(i)                              (0x004009c0 + 4*(i))
 #define NV10_PGRAPH_TILE(i)                                (0x00400B00 + (i*16))
 #define NV10_PGRAPH_TLIMIT(i)                              (0x00400B04 + (i*16))
 #define NV10_PGRAPH_TSIZE(i)                               (0x00400B08 + (i*16))
 #define NV47_PGRAPH_TSTATUS(i)                             (0x00400D0C + (i*16))
 #define NV04_PGRAPH_V_RAM                                  0x00400D40
 #define NV04_PGRAPH_W_RAM                                  0x00400D80
+#define NV47_PGRAPH_ZCOMP0(i)                              (0x00400e00 + 4*(i))
 #define NV10_PGRAPH_COMBINER0_IN_ALPHA                     0x00400E40
 #define NV10_PGRAPH_COMBINER1_IN_ALPHA                     0x00400E44
 #define NV10_PGRAPH_COMBINER0_IN_RGB                       0x00400E48
 #define NV04_PGRAPH_DMA_B_OFFSET                           0x00401098
 #define NV04_PGRAPH_DMA_B_SIZE                             0x0040109C
 #define NV04_PGRAPH_DMA_B_Y_SIZE                           0x004010A0
+#define NV47_PGRAPH_ZCOMP1(i)                              (0x004068c0 + 4*(i))
 #define NV40_PGRAPH_TILE1(i)                               (0x00406900 + (i*16))
 #define NV40_PGRAPH_TLIMIT1(i)                             (0x00406904 + (i*16))
 #define NV40_PGRAPH_TSIZE1(i)                              (0x00406908 + (i*16))
 #define NV40_PGRAPH_TSTATUS1(i)                            (0x0040690C + (i*16))
+#define NV40_PGRAPH_ZCOMP1(i)                              (0x00406980 + 4*(i))
+#define NV41_PGRAPH_ZCOMP1(i)                              (0x004069c0 + 4*(i))
 
 #endif
index 1f394a2..9fd8637 100644 (file)
@@ -121,9 +121,9 @@ nv31_mpeg_ofuncs = {
 
 static struct nouveau_omthds
 nv31_mpeg_omthds[] = {
-       { 0x0190, nv31_mpeg_mthd_dma },
-       { 0x01a0, nv31_mpeg_mthd_dma },
-       { 0x01b0, nv31_mpeg_mthd_dma },
+       { 0x0190, 0x0190, nv31_mpeg_mthd_dma },
+       { 0x01a0, 0x01a0, nv31_mpeg_mthd_dma },
+       { 0x01b0, 0x01b0, nv31_mpeg_mthd_dma },
        {}
 };
 
index 1241857..f7c581a 100644 (file)
@@ -38,7 +38,7 @@ struct nv40_mpeg_priv {
 };
 
 struct nv40_mpeg_chan {
-       struct nouveau_mpeg base;
+       struct nouveau_mpeg_chan base;
 };
 
 /*******************************************************************************
index 8678a99..bc7d12b 100644 (file)
@@ -157,7 +157,6 @@ nv50_mpeg_intr(struct nouveau_subdev *subdev)
 
        nv_wr32(priv, 0x00b100, stat);
        nv_wr32(priv, 0x00b230, 0x00000001);
-       nv50_fb_trap(nouveau_fb(priv), 1);
 }
 
 static void
index 50e7e0d..5a5b2a7 100644 (file)
  * Authors: Ben Skeggs
  */
 
-#include <core/os.h>
-#include <core/class.h>
+#include <core/engine.h>
 #include <core/engctx.h>
+#include <core/class.h>
 
 #include <engine/ppp.h>
 
 struct nv98_ppp_priv {
-       struct nouveau_ppp base;
+       struct nouveau_engine base;
 };
 
 struct nv98_ppp_chan {
-       struct nouveau_ppp_chan base;
+       struct nouveau_engctx base;
 };
 
 /*******************************************************************************
@@ -49,61 +49,16 @@ nv98_ppp_sclass[] = {
  * PPPP context
  ******************************************************************************/
 
-static int
-nv98_ppp_context_ctor(struct nouveau_object *parent,
-                     struct nouveau_object *engine,
-                     struct nouveau_oclass *oclass, void *data, u32 size,
-                     struct nouveau_object **pobject)
-{
-       struct nv98_ppp_chan *priv;
-       int ret;
-
-       ret = nouveau_ppp_context_create(parent, engine, oclass, NULL,
-                                        0, 0, 0, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static void
-nv98_ppp_context_dtor(struct nouveau_object *object)
-{
-       struct nv98_ppp_chan *priv = (void *)object;
-       nouveau_ppp_context_destroy(&priv->base);
-}
-
-static int
-nv98_ppp_context_init(struct nouveau_object *object)
-{
-       struct nv98_ppp_chan *priv = (void *)object;
-       int ret;
-
-       ret = nouveau_ppp_context_init(&priv->base);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int
-nv98_ppp_context_fini(struct nouveau_object *object, bool suspend)
-{
-       struct nv98_ppp_chan *priv = (void *)object;
-       return nouveau_ppp_context_fini(&priv->base, suspend);
-}
-
 static struct nouveau_oclass
 nv98_ppp_cclass = {
        .handle = NV_ENGCTX(PPP, 0x98),
        .ofuncs = &(struct nouveau_ofuncs) {
-               .ctor = nv98_ppp_context_ctor,
-               .dtor = nv98_ppp_context_dtor,
-               .init = nv98_ppp_context_init,
-               .fini = nv98_ppp_context_fini,
-               .rd32 = _nouveau_ppp_context_rd32,
-               .wr32 = _nouveau_ppp_context_wr32,
+               .ctor = _nouveau_engctx_ctor,
+               .dtor = _nouveau_engctx_dtor,
+               .init = _nouveau_engctx_init,
+               .fini = _nouveau_engctx_fini,
+               .rd32 = _nouveau_engctx_rd32,
+               .wr32 = _nouveau_engctx_wr32,
        },
 };
 
@@ -111,11 +66,6 @@ nv98_ppp_cclass = {
  * PPPP engine/subdev functions
  ******************************************************************************/
 
-static void
-nv98_ppp_intr(struct nouveau_subdev *subdev)
-{
-}
-
 static int
 nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
              struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +74,25 @@ nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nv98_ppp_priv *priv;
        int ret;
 
-       ret = nouveau_ppp_create(parent, engine, oclass, &priv);
+       ret = nouveau_engine_create(parent, engine, oclass, true,
+                                   "PPPP", "ppp", &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
 
        nv_subdev(priv)->unit = 0x00400002;
-       nv_subdev(priv)->intr = nv98_ppp_intr;
        nv_engine(priv)->cclass = &nv98_ppp_cclass;
        nv_engine(priv)->sclass = nv98_ppp_sclass;
        return 0;
 }
 
-static void
-nv98_ppp_dtor(struct nouveau_object *object)
-{
-       struct nv98_ppp_priv *priv = (void *)object;
-       nouveau_ppp_destroy(&priv->base);
-}
-
-static int
-nv98_ppp_init(struct nouveau_object *object)
-{
-       struct nv98_ppp_priv *priv = (void *)object;
-       int ret;
-
-       ret = nouveau_ppp_init(&priv->base);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int
-nv98_ppp_fini(struct nouveau_object *object, bool suspend)
-{
-       struct nv98_ppp_priv *priv = (void *)object;
-       return nouveau_ppp_fini(&priv->base, suspend);
-}
-
 struct nouveau_oclass
 nv98_ppp_oclass = {
        .handle = NV_ENGINE(PPP, 0x98),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nv98_ppp_ctor,
-               .dtor = nv98_ppp_dtor,
-               .init = nv98_ppp_init,
-               .fini = nv98_ppp_fini,
+               .dtor = _nouveau_engine_dtor,
+               .init = _nouveau_engine_init,
+               .fini = _nouveau_engine_fini,
        },
 };
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
new file mode 100644 (file)
index 0000000..ebf0d86
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/ppp.h>
+
+struct nvc0_ppp_priv {
+       struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * PPP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_ppp_sclass[] = {
+       { 0x90b3, &nouveau_object_ofuncs },
+       {},
+};
+
+/*******************************************************************************
+ * PPPP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_ppp_cclass = {
+       .handle = NV_ENGCTX(PPP, 0xc0),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = _nouveau_falcon_context_ctor,
+               .dtor = _nouveau_falcon_context_dtor,
+               .init = _nouveau_falcon_context_init,
+               .fini = _nouveau_falcon_context_fini,
+               .rd32 = _nouveau_falcon_context_rd32,
+               .wr32 = _nouveau_falcon_context_wr32,
+       },
+};
+
+/*******************************************************************************
+ * PPPP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_ppp_init(struct nouveau_object *object)
+{
+       struct nvc0_ppp_priv *priv = (void *)object;
+       int ret;
+
+       ret = nouveau_falcon_init(&priv->base);
+       if (ret)
+               return ret;
+
+       nv_wr32(priv, 0x086010, 0x0000fff2);
+       nv_wr32(priv, 0x08601c, 0x0000fff2);
+       return 0;
+}
+
+static int
+nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+             struct nouveau_oclass *oclass, void *data, u32 size,
+             struct nouveau_object **pobject)
+{
+       struct nvc0_ppp_priv *priv;
+       int ret;
+
+       ret = nouveau_falcon_create(parent, engine, oclass, 0x086000, true,
+                                   "PPPP", "ppp", &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_subdev(priv)->unit = 0x00000002;
+       nv_engine(priv)->cclass = &nvc0_ppp_cclass;
+       nv_engine(priv)->sclass = nvc0_ppp_sclass;
+       return 0;
+}
+
+struct nouveau_oclass
+nvc0_ppp_oclass = {
+       .handle = NV_ENGINE(PPP, 0xc0),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nvc0_ppp_ctor,
+               .dtor = _nouveau_falcon_dtor,
+               .init = nvc0_ppp_init,
+               .fini = _nouveau_falcon_fini,
+               .rd32 = _nouveau_falcon_rd32,
+               .wr32 = _nouveau_falcon_wr32,
+       },
+};
index 3ca4c3a..2a859a3 100644 (file)
@@ -63,8 +63,8 @@ nv04_software_flip(struct nouveau_object *object, u32 mthd,
 
 static struct nouveau_omthds
 nv04_software_omthds[] = {
-       { 0x0150, nv04_software_set_ref },
-       { 0x0500, nv04_software_flip },
+       { 0x0150, 0x0150, nv04_software_set_ref },
+       { 0x0500, 0x0500, nv04_software_flip },
        {}
 };
 
index 6e699af..a019364 100644 (file)
@@ -52,7 +52,7 @@ nv10_software_flip(struct nouveau_object *object, u32 mthd,
 
 static struct nouveau_omthds
 nv10_software_omthds[] = {
-       { 0x0500, nv10_software_flip },
+       { 0x0500, 0x0500, nv10_software_flip },
        {}
 };
 
index a2edcd3..b0e7e1c 100644 (file)
@@ -117,11 +117,11 @@ nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd,
 
 static struct nouveau_omthds
 nv50_software_omthds[] = {
-       { 0x018c, nv50_software_mthd_dma_vblsem },
-       { 0x0400, nv50_software_mthd_vblsem_offset },
-       { 0x0404, nv50_software_mthd_vblsem_value },
-       { 0x0408, nv50_software_mthd_vblsem_release },
-       { 0x0500, nv50_software_mthd_flip },
+       { 0x018c, 0x018c, nv50_software_mthd_dma_vblsem },
+       { 0x0400, 0x0400, nv50_software_mthd_vblsem_offset },
+       { 0x0404, 0x0404, nv50_software_mthd_vblsem_value },
+       { 0x0408, 0x0408, nv50_software_mthd_vblsem_release },
+       { 0x0500, 0x0500, nv50_software_mthd_flip },
        {}
 };
 
index b7b0d7e..282a1cd 100644 (file)
@@ -99,11 +99,11 @@ nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
 
 static struct nouveau_omthds
 nvc0_software_omthds[] = {
-       { 0x0400, nvc0_software_mthd_vblsem_offset },
-       { 0x0404, nvc0_software_mthd_vblsem_offset },
-       { 0x0408, nvc0_software_mthd_vblsem_value },
-       { 0x040c, nvc0_software_mthd_vblsem_release },
-       { 0x0500, nvc0_software_mthd_flip },
+       { 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset },
+       { 0x0404, 0x0404, nvc0_software_mthd_vblsem_offset },
+       { 0x0408, 0x0408, nvc0_software_mthd_vblsem_value },
+       { 0x040c, 0x040c, nvc0_software_mthd_vblsem_release },
+       { 0x0500, 0x0500, nvc0_software_mthd_flip },
        {}
 };
 
index dd23c80..261cd96 100644 (file)
  * Authors: Ben Skeggs
  */
 
-#include <core/os.h>
-#include <core/class.h>
 #include <core/engctx.h>
+#include <core/class.h>
 
 #include <engine/vp.h>
 
 struct nv84_vp_priv {
-       struct nouveau_vp base;
-};
-
-struct nv84_vp_chan {
-       struct nouveau_vp_chan base;
+       struct nouveau_engine base;
 };
 
 /*******************************************************************************
@@ -49,61 +44,16 @@ nv84_vp_sclass[] = {
  * PVP context
  ******************************************************************************/
 
-static int
-nv84_vp_context_ctor(struct nouveau_object *parent,
-                    struct nouveau_object *engine,
-                    struct nouveau_oclass *oclass, void *data, u32 size,
-                    struct nouveau_object **pobject)
-{
-       struct nv84_vp_chan *priv;
-       int ret;
-
-       ret = nouveau_vp_context_create(parent, engine, oclass, NULL,
-                                       0, 0, 0, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static void
-nv84_vp_context_dtor(struct nouveau_object *object)
-{
-       struct nv84_vp_chan *priv = (void *)object;
-       nouveau_vp_context_destroy(&priv->base);
-}
-
-static int
-nv84_vp_context_init(struct nouveau_object *object)
-{
-       struct nv84_vp_chan *priv = (void *)object;
-       int ret;
-
-       ret = nouveau_vp_context_init(&priv->base);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int
-nv84_vp_context_fini(struct nouveau_object *object, bool suspend)
-{
-       struct nv84_vp_chan *priv = (void *)object;
-       return nouveau_vp_context_fini(&priv->base, suspend);
-}
-
 static struct nouveau_oclass
 nv84_vp_cclass = {
        .handle = NV_ENGCTX(VP, 0x84),
        .ofuncs = &(struct nouveau_ofuncs) {
-               .ctor = nv84_vp_context_ctor,
-               .dtor = nv84_vp_context_dtor,
-               .init = nv84_vp_context_init,
-               .fini = nv84_vp_context_fini,
-               .rd32 = _nouveau_vp_context_rd32,
-               .wr32 = _nouveau_vp_context_wr32,
+               .ctor = _nouveau_engctx_ctor,
+               .dtor = _nouveau_engctx_dtor,
+               .init = _nouveau_engctx_init,
+               .fini = _nouveau_engctx_fini,
+               .rd32 = _nouveau_engctx_rd32,
+               .wr32 = _nouveau_engctx_wr32,
        },
 };
 
@@ -111,11 +61,6 @@ nv84_vp_cclass = {
  * PVP engine/subdev functions
  ******************************************************************************/
 
-static void
-nv84_vp_intr(struct nouveau_subdev *subdev)
-{
-}
-
 static int
 nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
             struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +69,25 @@ nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nv84_vp_priv *priv;
        int ret;
 
-       ret = nouveau_vp_create(parent, engine, oclass, &priv);
+       ret = nouveau_engine_create(parent, engine, oclass, true,
+                                   "PVP", "vp", &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
 
        nv_subdev(priv)->unit = 0x01020000;
-       nv_subdev(priv)->intr = nv84_vp_intr;
        nv_engine(priv)->cclass = &nv84_vp_cclass;
        nv_engine(priv)->sclass = nv84_vp_sclass;
        return 0;
 }
 
-static void
-nv84_vp_dtor(struct nouveau_object *object)
-{
-       struct nv84_vp_priv *priv = (void *)object;
-       nouveau_vp_destroy(&priv->base);
-}
-
-static int
-nv84_vp_init(struct nouveau_object *object)
-{
-       struct nv84_vp_priv *priv = (void *)object;
-       int ret;
-
-       ret = nouveau_vp_init(&priv->base);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int
-nv84_vp_fini(struct nouveau_object *object, bool suspend)
-{
-       struct nv84_vp_priv *priv = (void *)object;
-       return nouveau_vp_fini(&priv->base, suspend);
-}
-
 struct nouveau_oclass
 nv84_vp_oclass = {
        .handle = NV_ENGINE(VP, 0x84),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nv84_vp_ctor,
-               .dtor = nv84_vp_dtor,
-               .init = nv84_vp_init,
-               .fini = nv84_vp_fini,
+               .dtor = _nouveau_engine_dtor,
+               .init = _nouveau_engine_init,
+               .fini = _nouveau_engine_fini,
        },
 };
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
new file mode 100644 (file)
index 0000000..f761949
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/vp.h>
+
+struct nvc0_vp_priv {
+       struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * VP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_vp_sclass[] = {
+       { 0x90b2, &nouveau_object_ofuncs },
+       {},
+};
+
+/*******************************************************************************
+ * PVP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_vp_cclass = {
+       .handle = NV_ENGCTX(VP, 0xc0),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = _nouveau_falcon_context_ctor,
+               .dtor = _nouveau_falcon_context_dtor,
+               .init = _nouveau_falcon_context_init,
+               .fini = _nouveau_falcon_context_fini,
+               .rd32 = _nouveau_falcon_context_rd32,
+               .wr32 = _nouveau_falcon_context_wr32,
+       },
+};
+
+/*******************************************************************************
+ * PVP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_vp_init(struct nouveau_object *object)
+{
+       struct nvc0_vp_priv *priv = (void *)object;
+       int ret;
+
+       ret = nouveau_falcon_init(&priv->base);
+       if (ret)
+               return ret;
+
+       nv_wr32(priv, 0x085010, 0x0000fff2);
+       nv_wr32(priv, 0x08501c, 0x0000fff2);
+       return 0;
+}
+
+static int
+nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nvc0_vp_priv *priv;
+       int ret;
+
+       ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
+                                   "PVP", "vp", &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_subdev(priv)->unit = 0x00020000;
+       nv_engine(priv)->cclass = &nvc0_vp_cclass;
+       nv_engine(priv)->sclass = nvc0_vp_sclass;
+       return 0;
+}
+
+struct nouveau_oclass
+nvc0_vp_oclass = {
+       .handle = NV_ENGINE(VP, 0xc0),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nvc0_vp_ctor,
+               .dtor = _nouveau_falcon_dtor,
+               .init = nvc0_vp_init,
+               .fini = _nouveau_falcon_fini,
+               .rd32 = _nouveau_falcon_rd32,
+               .wr32 = _nouveau_falcon_wr32,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
new file mode 100644 (file)
index 0000000..2384ce5
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/falcon.h>
+
+#include <engine/vp.h>
+
+struct nve0_vp_priv {
+       struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * VP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_vp_sclass[] = {
+       { 0x95b2, &nouveau_object_ofuncs },
+       {},
+};
+
+/*******************************************************************************
+ * PVP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_vp_cclass = {
+       .handle = NV_ENGCTX(VP, 0xe0),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = _nouveau_falcon_context_ctor,
+               .dtor = _nouveau_falcon_context_dtor,
+               .init = _nouveau_falcon_context_init,
+               .fini = _nouveau_falcon_context_fini,
+               .rd32 = _nouveau_falcon_context_rd32,
+               .wr32 = _nouveau_falcon_context_wr32,
+       },
+};
+
+/*******************************************************************************
+ * PVP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nve0_vp_init(struct nouveau_object *object)
+{
+       struct nve0_vp_priv *priv = (void *)object;
+       int ret;
+
+       ret = nouveau_falcon_init(&priv->base);
+       if (ret)
+               return ret;
+
+       nv_wr32(priv, 0x085010, 0x0000fff2);
+       nv_wr32(priv, 0x08501c, 0x0000fff2);
+       return 0;
+}
+
+static int
+nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nve0_vp_priv *priv;
+       int ret;
+
+       ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
+                                   "PVP", "vp", &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_subdev(priv)->unit = 0x00020000;
+       nv_engine(priv)->cclass = &nve0_vp_cclass;
+       nv_engine(priv)->sclass = nve0_vp_sclass;
+       return 0;
+}
+
+struct nouveau_oclass
+nve0_vp_oclass = {
+       .handle = NV_ENGINE(VP, 0xe0),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nve0_vp_ctor,
+               .dtor = _nouveau_falcon_dtor,
+               .init = nve0_vp_init,
+               .fini = _nouveau_falcon_fini,
+               .rd32 = _nouveau_falcon_rd32,
+               .wr32 = _nouveau_falcon_wr32,
+       },
+};
index 6180ae9..47c4b3a 100644 (file)
@@ -23,6 +23,7 @@
 #define NV_DEVICE_DISABLE_COPY0                           0x0000008000000000ULL
 #define NV_DEVICE_DISABLE_COPY1                           0x0000010000000000ULL
 #define NV_DEVICE_DISABLE_UNK1C1                          0x0000020000000000ULL
+#define NV_DEVICE_DISABLE_VENC                            0x0000040000000000ULL
 
 struct nv_device_class {
        u64 device;     /* device identifier, ~0 for client default */
@@ -52,11 +53,49 @@ struct nv_device_class {
 #define NV_DMA_ACCESS_WR                                             0x00000200
 #define NV_DMA_ACCESS_RDWR                                           0x00000300
 
+/* NV50:NVC0 */
+#define NV50_DMA_CONF0_ENABLE                                        0x80000000
+#define NV50_DMA_CONF0_PRIV                                          0x00300000
+#define NV50_DMA_CONF0_PRIV_VM                                       0x00000000
+#define NV50_DMA_CONF0_PRIV_US                                       0x00100000
+#define NV50_DMA_CONF0_PRIV__S                                       0x00200000
+#define NV50_DMA_CONF0_PART                                          0x00030000
+#define NV50_DMA_CONF0_PART_VM                                       0x00000000
+#define NV50_DMA_CONF0_PART_256                                      0x00010000
+#define NV50_DMA_CONF0_PART_1KB                                      0x00020000
+#define NV50_DMA_CONF0_COMP                                          0x00000180
+#define NV50_DMA_CONF0_COMP_NONE                                     0x00000000
+#define NV50_DMA_CONF0_COMP_VM                                       0x00000180
+#define NV50_DMA_CONF0_TYPE                                          0x0000007f
+#define NV50_DMA_CONF0_TYPE_LINEAR                                   0x00000000
+#define NV50_DMA_CONF0_TYPE_VM                                       0x0000007f
+
+/* NVC0:NVD9 */
+#define NVC0_DMA_CONF0_ENABLE                                        0x80000000
+#define NVC0_DMA_CONF0_PRIV                                          0x00300000
+#define NVC0_DMA_CONF0_PRIV_VM                                       0x00000000
+#define NVC0_DMA_CONF0_PRIV_US                                       0x00100000
+#define NVC0_DMA_CONF0_PRIV__S                                       0x00200000
+#define NVC0_DMA_CONF0_UNKN /* PART? */                              0x00030000
+#define NVC0_DMA_CONF0_TYPE                                          0x000000ff
+#define NVC0_DMA_CONF0_TYPE_LINEAR                                   0x00000000
+#define NVC0_DMA_CONF0_TYPE_VM                                       0x000000ff
+
+/* NVD9- */
+#define NVD0_DMA_CONF0_ENABLE                                        0x80000000
+#define NVD0_DMA_CONF0_PAGE                                          0x00000400
+#define NVD0_DMA_CONF0_PAGE_LP                                       0x00000000
+#define NVD0_DMA_CONF0_PAGE_SP                                       0x00000400
+#define NVD0_DMA_CONF0_TYPE                                          0x000000ff
+#define NVD0_DMA_CONF0_TYPE_LINEAR                                   0x00000000
+#define NVD0_DMA_CONF0_TYPE_VM                                       0x000000ff
+
 struct nv_dma_class {
        u32 flags;
        u32 pad0;
        u64 start;
        u64 limit;
+       u32 conf0;
 };
 
 /* DMA FIFO channel classes
@@ -115,4 +154,190 @@ struct nve0_channel_ind_class {
        u32 engine;
 };
 
+/* 5070: NV50_DISP
+ * 8270: NV84_DISP
+ * 8370: NVA0_DISP
+ * 8870: NV94_DISP
+ * 8570: NVA3_DISP
+ * 9070: NVD0_DISP
+ * 9170: NVE0_DISP
+ */
+
+#define NV50_DISP_CLASS                                              0x00005070
+#define NV84_DISP_CLASS                                              0x00008270
+#define NVA0_DISP_CLASS                                              0x00008370
+#define NV94_DISP_CLASS                                              0x00008870
+#define NVA3_DISP_CLASS                                              0x00008570
+#define NVD0_DISP_CLASS                                              0x00009070
+#define NVE0_DISP_CLASS                                              0x00009170
+
+#define NV50_DISP_SOR_MTHD                                           0x00010000
+#define NV50_DISP_SOR_MTHD_TYPE                                      0x0000f000
+#define NV50_DISP_SOR_MTHD_HEAD                                      0x00000018
+#define NV50_DISP_SOR_MTHD_LINK                                      0x00000004
+#define NV50_DISP_SOR_MTHD_OR                                        0x00000003
+
+#define NV50_DISP_SOR_PWR                                            0x00010000
+#define NV50_DISP_SOR_PWR_STATE                                      0x00000001
+#define NV50_DISP_SOR_PWR_STATE_ON                                   0x00000001
+#define NV50_DISP_SOR_PWR_STATE_OFF                                  0x00000000
+#define NVA3_DISP_SOR_HDA_ELD                                        0x00010100
+#define NV84_DISP_SOR_HDMI_PWR                                       0x00012000
+#define NV84_DISP_SOR_HDMI_PWR_STATE                                 0x40000000
+#define NV84_DISP_SOR_HDMI_PWR_STATE_OFF                             0x00000000
+#define NV84_DISP_SOR_HDMI_PWR_STATE_ON                              0x40000000
+#define NV84_DISP_SOR_HDMI_PWR_MAX_AC_PACKET                         0x001f0000
+#define NV84_DISP_SOR_HDMI_PWR_REKEY                                 0x0000007f
+#define NV50_DISP_SOR_LVDS_SCRIPT                                    0x00013000
+#define NV50_DISP_SOR_LVDS_SCRIPT_ID                                 0x0000ffff
+#define NV94_DISP_SOR_DP_TRAIN                                       0x00016000
+#define NV94_DISP_SOR_DP_TRAIN_OP                                    0xf0000000
+#define NV94_DISP_SOR_DP_TRAIN_OP_PATTERN                            0x00000000
+#define NV94_DISP_SOR_DP_TRAIN_OP_INIT                               0x10000000
+#define NV94_DISP_SOR_DP_TRAIN_OP_FINI                               0x20000000
+#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD                           0x00000001
+#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF                       0x00000000
+#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON                        0x00000001
+#define NV94_DISP_SOR_DP_TRAIN_PATTERN                               0x00000003
+#define NV94_DISP_SOR_DP_TRAIN_PATTERN_DISABLED                      0x00000000
+#define NV94_DISP_SOR_DP_LNKCTL                                      0x00016040
+#define NV94_DISP_SOR_DP_LNKCTL_FRAME                                0x80000000
+#define NV94_DISP_SOR_DP_LNKCTL_FRAME_STD                            0x00000000
+#define NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH                            0x80000000
+#define NV94_DISP_SOR_DP_LNKCTL_WIDTH                                0x00001f00
+#define NV94_DISP_SOR_DP_LNKCTL_COUNT                                0x00000007
+#define NV94_DISP_SOR_DP_DRVCTL(l)                     ((l) * 0x40 + 0x00016100)
+#define NV94_DISP_SOR_DP_DRVCTL_VS                                   0x00000300
+#define NV94_DISP_SOR_DP_DRVCTL_PE                                   0x00000003
+
+#define NV50_DISP_DAC_MTHD                                           0x00020000
+#define NV50_DISP_DAC_MTHD_TYPE                                      0x0000f000
+#define NV50_DISP_DAC_MTHD_OR                                        0x00000003
+
+#define NV50_DISP_DAC_PWR                                            0x00020000
+#define NV50_DISP_DAC_PWR_HSYNC                                      0x00000001
+#define NV50_DISP_DAC_PWR_HSYNC_ON                                   0x00000000
+#define NV50_DISP_DAC_PWR_HSYNC_LO                                   0x00000001
+#define NV50_DISP_DAC_PWR_VSYNC                                      0x00000004
+#define NV50_DISP_DAC_PWR_VSYNC_ON                                   0x00000000
+#define NV50_DISP_DAC_PWR_VSYNC_LO                                   0x00000004
+#define NV50_DISP_DAC_PWR_DATA                                       0x00000010
+#define NV50_DISP_DAC_PWR_DATA_ON                                    0x00000000
+#define NV50_DISP_DAC_PWR_DATA_LO                                    0x00000010
+#define NV50_DISP_DAC_PWR_STATE                                      0x00000040
+#define NV50_DISP_DAC_PWR_STATE_ON                                   0x00000000
+#define NV50_DISP_DAC_PWR_STATE_OFF                                  0x00000040
+#define NV50_DISP_DAC_LOAD                                           0x0002000c
+#define NV50_DISP_DAC_LOAD_VALUE                                     0x00000007
+
+struct nv50_display_class {
+};
+
+/* 507a: NV50_DISP_CURS
+ * 827a: NV84_DISP_CURS
+ * 837a: NVA0_DISP_CURS
+ * 887a: NV94_DISP_CURS
+ * 857a: NVA3_DISP_CURS
+ * 907a: NVD0_DISP_CURS
+ * 917a: NVE0_DISP_CURS
+ */
+
+#define NV50_DISP_CURS_CLASS                                         0x0000507a
+#define NV84_DISP_CURS_CLASS                                         0x0000827a
+#define NVA0_DISP_CURS_CLASS                                         0x0000837a
+#define NV94_DISP_CURS_CLASS                                         0x0000887a
+#define NVA3_DISP_CURS_CLASS                                         0x0000857a
+#define NVD0_DISP_CURS_CLASS                                         0x0000907a
+#define NVE0_DISP_CURS_CLASS                                         0x0000917a
+
+struct nv50_display_curs_class {
+       u32 head;
+};
+
+/* 507b: NV50_DISP_OIMM
+ * 827b: NV84_DISP_OIMM
+ * 837b: NVA0_DISP_OIMM
+ * 887b: NV94_DISP_OIMM
+ * 857b: NVA3_DISP_OIMM
+ * 907b: NVD0_DISP_OIMM
+ * 917b: NVE0_DISP_OIMM
+ */
+
+#define NV50_DISP_OIMM_CLASS                                         0x0000507b
+#define NV84_DISP_OIMM_CLASS                                         0x0000827b
+#define NVA0_DISP_OIMM_CLASS                                         0x0000837b
+#define NV94_DISP_OIMM_CLASS                                         0x0000887b
+#define NVA3_DISP_OIMM_CLASS                                         0x0000857b
+#define NVD0_DISP_OIMM_CLASS                                         0x0000907b
+#define NVE0_DISP_OIMM_CLASS                                         0x0000917b
+
+struct nv50_display_oimm_class {
+       u32 head;
+};
+
+/* 507c: NV50_DISP_SYNC
+ * 827c: NV84_DISP_SYNC
+ * 837c: NVA0_DISP_SYNC
+ * 887c: NV94_DISP_SYNC
+ * 857c: NVA3_DISP_SYNC
+ * 907c: NVD0_DISP_SYNC
+ * 917c: NVE0_DISP_SYNC
+ */
+
+#define NV50_DISP_SYNC_CLASS                                         0x0000507c
+#define NV84_DISP_SYNC_CLASS                                         0x0000827c
+#define NVA0_DISP_SYNC_CLASS                                         0x0000837c
+#define NV94_DISP_SYNC_CLASS                                         0x0000887c
+#define NVA3_DISP_SYNC_CLASS                                         0x0000857c
+#define NVD0_DISP_SYNC_CLASS                                         0x0000907c
+#define NVE0_DISP_SYNC_CLASS                                         0x0000917c
+
+struct nv50_display_sync_class {
+       u32 pushbuf;
+       u32 head;
+};
+
+/* 507d: NV50_DISP_MAST
+ * 827d: NV84_DISP_MAST
+ * 837d: NVA0_DISP_MAST
+ * 887d: NV94_DISP_MAST
+ * 857d: NVA3_DISP_MAST
+ * 907d: NVD0_DISP_MAST
+ * 917d: NVE0_DISP_MAST
+ */
+
+#define NV50_DISP_MAST_CLASS                                         0x0000507d
+#define NV84_DISP_MAST_CLASS                                         0x0000827d
+#define NVA0_DISP_MAST_CLASS                                         0x0000837d
+#define NV94_DISP_MAST_CLASS                                         0x0000887d
+#define NVA3_DISP_MAST_CLASS                                         0x0000857d
+#define NVD0_DISP_MAST_CLASS                                         0x0000907d
+#define NVE0_DISP_MAST_CLASS                                         0x0000917d
+
+struct nv50_display_mast_class {
+       u32 pushbuf;
+};
+
+/* 507e: NV50_DISP_OVLY
+ * 827e: NV84_DISP_OVLY
+ * 837e: NVA0_DISP_OVLY
+ * 887e: NV94_DISP_OVLY
+ * 857e: NVA3_DISP_OVLY
+ * 907e: NVD0_DISP_OVLY
+ * 917e: NVE0_DISP_OVLY
+ */
+
+#define NV50_DISP_OVLY_CLASS                                         0x0000507e
+#define NV84_DISP_OVLY_CLASS                                         0x0000827e
+#define NVA0_DISP_OVLY_CLASS                                         0x0000837e
+#define NV94_DISP_OVLY_CLASS                                         0x0000887e
+#define NVA3_DISP_OVLY_CLASS                                         0x0000857e
+#define NVD0_DISP_OVLY_CLASS                                         0x0000907e
+#define NVE0_DISP_OVLY_CLASS                                         0x0000917e
+
+struct nv50_display_ovly_class {
+       u32 pushbuf;
+       u32 head;
+};
+
 #endif
index 8a947b6..2fd48b5 100644 (file)
@@ -39,6 +39,9 @@ void nouveau_engctx_destroy(struct nouveau_engctx *);
 int  nouveau_engctx_init(struct nouveau_engctx *);
 int  nouveau_engctx_fini(struct nouveau_engctx *, bool suspend);
 
+int  _nouveau_engctx_ctor(struct nouveau_object *, struct nouveau_object *,
+                         struct nouveau_oclass *, void *, u32,
+                         struct nouveau_object **);
 void _nouveau_engctx_dtor(struct nouveau_object *);
 int  _nouveau_engctx_init(struct nouveau_object *);
 int  _nouveau_engctx_fini(struct nouveau_object *, bool suspend);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/falcon.h b/drivers/gpu/drm/nouveau/core/include/core/falcon.h
new file mode 100644 (file)
index 0000000..1edec38
--- /dev/null
@@ -0,0 +1,81 @@
+#ifndef __NOUVEAU_FALCON_H__
+#define __NOUVEAU_FALCON_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+#include <core/gpuobj.h>
+
+struct nouveau_falcon_chan {
+       struct nouveau_engctx base;
+};
+
+#define nouveau_falcon_context_create(p,e,c,g,s,a,f,d)                         \
+       nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_falcon_context_destroy(d)                                      \
+       nouveau_engctx_destroy(&(d)->base)
+#define nouveau_falcon_context_init(d)                                         \
+       nouveau_engctx_init(&(d)->base)
+#define nouveau_falcon_context_fini(d,s)                                       \
+       nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_falcon_context_ctor _nouveau_engctx_ctor
+#define _nouveau_falcon_context_dtor _nouveau_engctx_dtor
+#define _nouveau_falcon_context_init _nouveau_engctx_init
+#define _nouveau_falcon_context_fini _nouveau_engctx_fini
+#define _nouveau_falcon_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_falcon_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_falcon_data {
+       bool external;
+};
+
+struct nouveau_falcon {
+       struct nouveau_engine base;
+
+       u32 addr;
+       u8  version;
+       u8  secret;
+
+       struct nouveau_gpuobj *core;
+       bool external;
+
+       struct {
+               u32 limit;
+               u32 *data;
+               u32  size;
+       } code;
+
+       struct {
+               u32 limit;
+               u32 *data;
+               u32  size;
+       } data;
+};
+
+#define nv_falcon(priv) (&(priv)->base)
+
+#define nouveau_falcon_create(p,e,c,b,d,i,f,r)                                 \
+       nouveau_falcon_create_((p), (e), (c), (b), (d), (i), (f),              \
+                              sizeof(**r),(void **)r)
+#define nouveau_falcon_destroy(p)                                              \
+       nouveau_engine_destroy(&(p)->base)
+#define nouveau_falcon_init(p) ({                                              \
+       struct nouveau_falcon *falcon = (p);                                   \
+       _nouveau_falcon_init(nv_object(falcon));                               \
+})
+#define nouveau_falcon_fini(p,s) ({                                            \
+       struct nouveau_falcon *falcon = (p);                                   \
+       _nouveau_falcon_fini(nv_object(falcon), (s));                          \
+})
+
+int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *,
+                          struct nouveau_oclass *, u32, bool, const char *,
+                          const char *, int, void **);
+
+#define _nouveau_falcon_dtor _nouveau_engine_dtor
+int  _nouveau_falcon_init(struct nouveau_object *);
+int  _nouveau_falcon_fini(struct nouveau_object *, bool);
+u32  _nouveau_falcon_rd32(struct nouveau_object *, u64);
+void _nouveau_falcon_wr32(struct nouveau_object *, u64, u32);
+
+#endif
index 6eaff79..b3b9ce4 100644 (file)
@@ -65,7 +65,7 @@ nouveau_gpuobj_ref(struct nouveau_gpuobj *obj, struct nouveau_gpuobj **ref)
 void _nouveau_gpuobj_dtor(struct nouveau_object *);
 int  _nouveau_gpuobj_init(struct nouveau_object *);
 int  _nouveau_gpuobj_fini(struct nouveau_object *, bool);
-u32  _nouveau_gpuobj_rd32(struct nouveau_object *, u32);
-void _nouveau_gpuobj_wr32(struct nouveau_object *, u32, u32);
+u32  _nouveau_gpuobj_rd32(struct nouveau_object *, u64);
+void _nouveau_gpuobj_wr32(struct nouveau_object *, u64, u32);
 
 #endif
index 975137b..2514e81 100644 (file)
@@ -21,6 +21,12 @@ struct nouveau_mm {
        int heap_nodes;
 };
 
+static inline bool
+nouveau_mm_initialised(struct nouveau_mm *mm)
+{
+       return mm->block_size != 0;
+}
+
 int  nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
 int  nouveau_mm_fini(struct nouveau_mm *);
 int  nouveau_mm_head(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min,
index 818feab..5982935 100644 (file)
@@ -70,7 +70,8 @@ nv_pclass(struct nouveau_object *parent, u32 oclass)
 }
 
 struct nouveau_omthds {
-       u32 method;
+       u32 start;
+       u32 limit;
        int (*call)(struct nouveau_object *, u32, void *, u32);
 };
 
@@ -81,12 +82,12 @@ struct nouveau_ofuncs {
        void (*dtor)(struct nouveau_object *);
        int  (*init)(struct nouveau_object *);
        int  (*fini)(struct nouveau_object *, bool suspend);
-       u8   (*rd08)(struct nouveau_object *, u32 offset);
-       u16  (*rd16)(struct nouveau_object *, u32 offset);
-       u32  (*rd32)(struct nouveau_object *, u32 offset);
-       void (*wr08)(struct nouveau_object *, u32 offset, u8 data);
-       void (*wr16)(struct nouveau_object *, u32 offset, u16 data);
-       void (*wr32)(struct nouveau_object *, u32 offset, u32 data);
+       u8   (*rd08)(struct nouveau_object *, u64 offset);
+       u16  (*rd16)(struct nouveau_object *, u64 offset);
+       u32  (*rd32)(struct nouveau_object *, u64 offset);
+       void (*wr08)(struct nouveau_object *, u64 offset, u8 data);
+       void (*wr16)(struct nouveau_object *, u64 offset, u16 data);
+       void (*wr32)(struct nouveau_object *, u64 offset, u32 data);
 };
 
 static inline struct nouveau_ofuncs *
@@ -109,21 +110,27 @@ int nouveau_object_del(struct nouveau_object *, u32 parent, u32 handle);
 void nouveau_object_debug(void);
 
 static inline int
-nv_call(void *obj, u32 mthd, u32 data)
+nv_exec(void *obj, u32 mthd, void *data, u32 size)
 {
        struct nouveau_omthds *method = nv_oclass(obj)->omthds;
 
        while (method && method->call) {
-               if (method->method == mthd)
-                       return method->call(obj, mthd, &data, sizeof(data));
+               if (mthd >= method->start && mthd <= method->limit)
+                       return method->call(obj, mthd, data, size);
                method++;
        }
 
        return -EINVAL;
 }
 
+static inline int
+nv_call(void *obj, u32 mthd, u32 data)
+{
+       return nv_exec(obj, mthd, &data, sizeof(data));
+}
+
 static inline u8
-nv_ro08(void *obj, u32 addr)
+nv_ro08(void *obj, u64 addr)
 {
        u8 data = nv_ofuncs(obj)->rd08(obj, addr);
        nv_spam(obj, "nv_ro08 0x%08x 0x%02x\n", addr, data);
@@ -131,7 +138,7 @@ nv_ro08(void *obj, u32 addr)
 }
 
 static inline u16
-nv_ro16(void *obj, u32 addr)
+nv_ro16(void *obj, u64 addr)
 {
        u16 data = nv_ofuncs(obj)->rd16(obj, addr);
        nv_spam(obj, "nv_ro16 0x%08x 0x%04x\n", addr, data);
@@ -139,7 +146,7 @@ nv_ro16(void *obj, u32 addr)
 }
 
 static inline u32
-nv_ro32(void *obj, u32 addr)
+nv_ro32(void *obj, u64 addr)
 {
        u32 data = nv_ofuncs(obj)->rd32(obj, addr);
        nv_spam(obj, "nv_ro32 0x%08x 0x%08x\n", addr, data);
@@ -147,42 +154,46 @@ nv_ro32(void *obj, u32 addr)
 }
 
 static inline void
-nv_wo08(void *obj, u32 addr, u8 data)
+nv_wo08(void *obj, u64 addr, u8 data)
 {
        nv_spam(obj, "nv_wo08 0x%08x 0x%02x\n", addr, data);
        nv_ofuncs(obj)->wr08(obj, addr, data);
 }
 
 static inline void
-nv_wo16(void *obj, u32 addr, u16 data)
+nv_wo16(void *obj, u64 addr, u16 data)
 {
        nv_spam(obj, "nv_wo16 0x%08x 0x%04x\n", addr, data);
        nv_ofuncs(obj)->wr16(obj, addr, data);
 }
 
 static inline void
-nv_wo32(void *obj, u32 addr, u32 data)
+nv_wo32(void *obj, u64 addr, u32 data)
 {
        nv_spam(obj, "nv_wo32 0x%08x 0x%08x\n", addr, data);
        nv_ofuncs(obj)->wr32(obj, addr, data);
 }
 
 static inline u32
-nv_mo32(void *obj, u32 addr, u32 mask, u32 data)
+nv_mo32(void *obj, u64 addr, u32 mask, u32 data)
 {
        u32 temp = nv_ro32(obj, addr);
        nv_wo32(obj, addr, (temp & ~mask) | data);
        return temp;
 }
 
-static inline bool
-nv_strncmp(void *obj, u32 addr, u32 len, const char *str)
+static inline int
+nv_memcmp(void *obj, u32 addr, const char *str, u32 len)
 {
+       unsigned char c1, c2;
+
        while (len--) {
-               if (nv_ro08(obj, addr++) != *(str++))
-                       return false;
+               c1 = nv_ro08(obj, addr++);
+               c2 = *(str++);
+               if (c1 != c2)
+                       return c1 - c2;
        }
-       return true;
+       return 0;
 }
 
 #endif
index 3c2e940..31cd852 100644 (file)
@@ -14,7 +14,7 @@ struct nouveau_parent {
        struct nouveau_object base;
 
        struct nouveau_sclass *sclass;
-       u32 engine;
+       u64 engine;
 
        int  (*context_attach)(struct nouveau_object *,
                               struct nouveau_object *);
index 75d1ed5..13ccdf5 100644 (file)
@@ -1,45 +1,8 @@
 #ifndef __NOUVEAU_BSP_H__
 #define __NOUVEAU_BSP_H__
 
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_bsp_chan {
-       struct nouveau_engctx base;
-};
-
-#define nouveau_bsp_context_create(p,e,c,g,s,a,f,d)                            \
-       nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_bsp_context_destroy(d)                                         \
-       nouveau_engctx_destroy(&(d)->base)
-#define nouveau_bsp_context_init(d)                                            \
-       nouveau_engctx_init(&(d)->base)
-#define nouveau_bsp_context_fini(d,s)                                          \
-       nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_bsp_context_dtor _nouveau_engctx_dtor
-#define _nouveau_bsp_context_init _nouveau_engctx_init
-#define _nouveau_bsp_context_fini _nouveau_engctx_fini
-#define _nouveau_bsp_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_bsp_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_bsp {
-       struct nouveau_engine base;
-};
-
-#define nouveau_bsp_create(p,e,c,d)                                            \
-       nouveau_engine_create((p), (e), (c), true, "PBSP", "bsp", (d))
-#define nouveau_bsp_destroy(d)                                                 \
-       nouveau_engine_destroy(&(d)->base)
-#define nouveau_bsp_init(d)                                                    \
-       nouveau_engine_init(&(d)->base)
-#define nouveau_bsp_fini(d,s)                                                  \
-       nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_bsp_dtor _nouveau_engine_dtor
-#define _nouveau_bsp_init _nouveau_engine_init
-#define _nouveau_bsp_fini _nouveau_engine_fini
-
 extern struct nouveau_oclass nv84_bsp_oclass;
+extern struct nouveau_oclass nvc0_bsp_oclass;
+extern struct nouveau_oclass nve0_bsp_oclass;
 
 #endif
index 70b9d8c..8cad2cf 100644 (file)
@@ -1,44 +1,7 @@
 #ifndef __NOUVEAU_COPY_H__
 #define __NOUVEAU_COPY_H__
 
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_copy_chan {
-       struct nouveau_engctx base;
-};
-
-#define nouveau_copy_context_create(p,e,c,g,s,a,f,d)                           \
-       nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_copy_context_destroy(d)                                        \
-       nouveau_engctx_destroy(&(d)->base)
-#define nouveau_copy_context_init(d)                                           \
-       nouveau_engctx_init(&(d)->base)
-#define nouveau_copy_context_fini(d,s)                                         \
-       nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_copy_context_dtor _nouveau_engctx_dtor
-#define _nouveau_copy_context_init _nouveau_engctx_init
-#define _nouveau_copy_context_fini _nouveau_engctx_fini
-#define _nouveau_copy_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_copy_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_copy {
-       struct nouveau_engine base;
-};
-
-#define nouveau_copy_create(p,e,c,y,i,d)                                       \
-       nouveau_engine_create((p), (e), (c), (y), "PCE"#i, "copy"#i, (d))
-#define nouveau_copy_destroy(d)                                                \
-       nouveau_engine_destroy(&(d)->base)
-#define nouveau_copy_init(d)                                                   \
-       nouveau_engine_init(&(d)->base)
-#define nouveau_copy_fini(d,s)                                                 \
-       nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_copy_dtor _nouveau_engine_dtor
-#define _nouveau_copy_init _nouveau_engine_init
-#define _nouveau_copy_fini _nouveau_engine_fini
+void nva3_copy_intr(struct nouveau_subdev *);
 
 extern struct nouveau_oclass nva3_copy_oclass;
 extern struct nouveau_oclass nvc0_copy0_oclass;
index e367474..db97561 100644 (file)
@@ -1,45 +1,6 @@
 #ifndef __NOUVEAU_CRYPT_H__
 #define __NOUVEAU_CRYPT_H__
 
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_crypt_chan {
-       struct nouveau_engctx base;
-};
-
-#define nouveau_crypt_context_create(p,e,c,g,s,a,f,d)                          \
-       nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_crypt_context_destroy(d)                                       \
-       nouveau_engctx_destroy(&(d)->base)
-#define nouveau_crypt_context_init(d)                                          \
-       nouveau_engctx_init(&(d)->base)
-#define nouveau_crypt_context_fini(d,s)                                        \
-       nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_crypt_context_dtor _nouveau_engctx_dtor
-#define _nouveau_crypt_context_init _nouveau_engctx_init
-#define _nouveau_crypt_context_fini _nouveau_engctx_fini
-#define _nouveau_crypt_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_crypt_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_crypt {
-       struct nouveau_engine base;
-};
-
-#define nouveau_crypt_create(p,e,c,d)                                          \
-       nouveau_engine_create((p), (e), (c), true, "PCRYPT", "crypt", (d))
-#define nouveau_crypt_destroy(d)                                               \
-       nouveau_engine_destroy(&(d)->base)
-#define nouveau_crypt_init(d)                                                  \
-       nouveau_engine_init(&(d)->base)
-#define nouveau_crypt_fini(d,s)                                                \
-       nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_crypt_dtor _nouveau_engine_dtor
-#define _nouveau_crypt_init _nouveau_engine_init
-#define _nouveau_crypt_fini _nouveau_engine_fini
-
 extern struct nouveau_oclass nv84_crypt_oclass;
 extern struct nouveau_oclass nv98_crypt_oclass;
 
index 38ec125..4694828 100644 (file)
@@ -39,6 +39,11 @@ nouveau_disp(void *obj)
 
 extern struct nouveau_oclass nv04_disp_oclass;
 extern struct nouveau_oclass nv50_disp_oclass;
+extern struct nouveau_oclass nv84_disp_oclass;
+extern struct nouveau_oclass nva0_disp_oclass;
+extern struct nouveau_oclass nv94_disp_oclass;
+extern struct nouveau_oclass nva3_disp_oclass;
 extern struct nouveau_oclass nvd0_disp_oclass;
+extern struct nouveau_oclass nve0_disp_oclass;
 
 #endif
index 700ccbb..b28914e 100644 (file)
@@ -12,29 +12,17 @@ struct nouveau_dmaobj {
        u32 access;
        u64 start;
        u64 limit;
+       u32 conf0;
 };
 
-#define nouveau_dmaobj_create(p,e,c,a,s,d)                                     \
-       nouveau_dmaobj_create_((p), (e), (c), (a), (s), sizeof(**d), (void **)d)
-#define nouveau_dmaobj_destroy(p)                                              \
-       nouveau_object_destroy(&(p)->base)
-#define nouveau_dmaobj_init(p)                                                 \
-       nouveau_object_init(&(p)->base)
-#define nouveau_dmaobj_fini(p,s)                                               \
-       nouveau_object_fini(&(p)->base, (s))
-
-int nouveau_dmaobj_create_(struct nouveau_object *, struct nouveau_object *,
-                          struct nouveau_oclass *, void *data, u32 size,
-                          int length, void **);
-
-#define _nouveau_dmaobj_dtor nouveau_object_destroy
-#define _nouveau_dmaobj_init nouveau_object_init
-#define _nouveau_dmaobj_fini nouveau_object_fini
-
 struct nouveau_dmaeng {
        struct nouveau_engine base;
-       int (*bind)(struct nouveau_dmaeng *, struct nouveau_object *parent,
-                   struct nouveau_dmaobj *, struct nouveau_gpuobj **);
+
+       /* creates a "physical" dma object from a struct nouveau_dmaobj */
+       int (*bind)(struct nouveau_dmaeng *dmaeng,
+                   struct nouveau_object *parent,
+                   struct nouveau_dmaobj *dmaobj,
+                   struct nouveau_gpuobj **);
 };
 
 #define nouveau_dmaeng_create(p,e,c,d)                                         \
@@ -53,5 +41,8 @@ struct nouveau_dmaeng {
 extern struct nouveau_oclass nv04_dmaeng_oclass;
 extern struct nouveau_oclass nv50_dmaeng_oclass;
 extern struct nouveau_oclass nvc0_dmaeng_oclass;
+extern struct nouveau_oclass nvd0_dmaeng_oclass;
+
+extern struct nouveau_oclass nouveau_dmaobj_sclass[];
 
 #endif
index d67fed1..f18846c 100644 (file)
@@ -33,15 +33,15 @@ int  nouveau_fifo_channel_create_(struct nouveau_object *,
                                  struct nouveau_object *,
                                  struct nouveau_oclass *,
                                  int bar, u32 addr, u32 size, u32 push,
-                                 u32 engmask, int len, void **);
+                                 u64 engmask, int len, void **);
 void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *);
 
 #define _nouveau_fifo_channel_init _nouveau_namedb_init
 #define _nouveau_fifo_channel_fini _nouveau_namedb_fini
 
 void _nouveau_fifo_channel_dtor(struct nouveau_object *);
-u32  _nouveau_fifo_channel_rd32(struct nouveau_object *, u32);
-void _nouveau_fifo_channel_wr32(struct nouveau_object *, u32, u32);
+u32  _nouveau_fifo_channel_rd32(struct nouveau_object *, u64);
+void _nouveau_fifo_channel_wr32(struct nouveau_object *, u64, u32);
 
 struct nouveau_fifo_base {
        struct nouveau_gpuobj base;
index 74d554f..0a66781 100644 (file)
@@ -1,45 +1,7 @@
 #ifndef __NOUVEAU_PPP_H__
 #define __NOUVEAU_PPP_H__
 
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_ppp_chan {
-       struct nouveau_engctx base;
-};
-
-#define nouveau_ppp_context_create(p,e,c,g,s,a,f,d)                            \
-       nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_ppp_context_destroy(d)                                         \
-       nouveau_engctx_destroy(&(d)->base)
-#define nouveau_ppp_context_init(d)                                            \
-       nouveau_engctx_init(&(d)->base)
-#define nouveau_ppp_context_fini(d,s)                                          \
-       nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_ppp_context_dtor _nouveau_engctx_dtor
-#define _nouveau_ppp_context_init _nouveau_engctx_init
-#define _nouveau_ppp_context_fini _nouveau_engctx_fini
-#define _nouveau_ppp_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_ppp_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_ppp {
-       struct nouveau_engine base;
-};
-
-#define nouveau_ppp_create(p,e,c,d)                                            \
-       nouveau_engine_create((p), (e), (c), true, "PPPP", "ppp", (d))
-#define nouveau_ppp_destroy(d)                                                 \
-       nouveau_engine_destroy(&(d)->base)
-#define nouveau_ppp_init(d)                                                    \
-       nouveau_engine_init(&(d)->base)
-#define nouveau_ppp_fini(d,s)                                                  \
-       nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_ppp_dtor _nouveau_engine_dtor
-#define _nouveau_ppp_init _nouveau_engine_init
-#define _nouveau_ppp_fini _nouveau_engine_fini
-
 extern struct nouveau_oclass nv98_ppp_oclass;
+extern struct nouveau_oclass nvc0_ppp_oclass;
 
 #endif
index 05cd08f..d7b287b 100644 (file)
@@ -1,45 +1,8 @@
 #ifndef __NOUVEAU_VP_H__
 #define __NOUVEAU_VP_H__
 
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_vp_chan {
-       struct nouveau_engctx base;
-};
-
-#define nouveau_vp_context_create(p,e,c,g,s,a,f,d)                             \
-       nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_vp_context_destroy(d)                                          \
-       nouveau_engctx_destroy(&(d)->base)
-#define nouveau_vp_context_init(d)                                             \
-       nouveau_engctx_init(&(d)->base)
-#define nouveau_vp_context_fini(d,s)                                           \
-       nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_vp_context_dtor _nouveau_engctx_dtor
-#define _nouveau_vp_context_init _nouveau_engctx_init
-#define _nouveau_vp_context_fini _nouveau_engctx_fini
-#define _nouveau_vp_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_vp_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_vp {
-       struct nouveau_engine base;
-};
-
-#define nouveau_vp_create(p,e,c,d)                                             \
-       nouveau_engine_create((p), (e), (c), true, "PVP", "vp", (d))
-#define nouveau_vp_destroy(d)                                                  \
-       nouveau_engine_destroy(&(d)->base)
-#define nouveau_vp_init(d)                                                     \
-       nouveau_engine_init(&(d)->base)
-#define nouveau_vp_fini(d,s)                                                   \
-       nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_vp_dtor _nouveau_engine_dtor
-#define _nouveau_vp_init _nouveau_engine_init
-#define _nouveau_vp_fini _nouveau_engine_fini
-
 extern struct nouveau_oclass nv84_vp_oclass;
+extern struct nouveau_oclass nvc0_vp_oclass;
+extern struct nouveau_oclass nve0_vp_oclass;
 
 #endif
index d682fb6..b79025d 100644 (file)
@@ -23,6 +23,7 @@ struct dcb_output {
        uint8_t bus;
        uint8_t location;
        uint8_t or;
+       uint8_t link;
        bool duallink_possible;
        union {
                struct sor_conf {
@@ -55,36 +56,11 @@ struct dcb_output {
 
 u16 dcb_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *ent, u8 *len);
 u16 dcb_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
+u16 dcb_outp_parse(struct nouveau_bios *, u8 idx, u8 *, u8 *,
+                  struct dcb_output *);
+u16 dcb_outp_match(struct nouveau_bios *, u16 type, u16 mask, u8 *, u8 *,
+                  struct dcb_output *);
 int dcb_outp_foreach(struct nouveau_bios *, void *data, int (*exec)
                     (struct nouveau_bios *, void *, int index, u16 entry));
 
-
-/* BIT 'U'/'d' table encoder subtables have hashes matching them to
- * a particular set of encoders.
- *
- * This function returns true if a particular DCB entry matches.
- */
-static inline bool
-dcb_hash_match(struct dcb_output *dcb, u32 hash)
-{
-       if ((hash & 0x000000f0) != (dcb->location << 4))
-               return false;
-       if ((hash & 0x0000000f) != dcb->type)
-               return false;
-       if (!(hash & (dcb->or << 16)))
-               return false;
-
-       switch (dcb->type) {
-       case DCB_OUTPUT_TMDS:
-       case DCB_OUTPUT_LVDS:
-       case DCB_OUTPUT_DP:
-               if (hash & 0x00c00000) {
-                       if (!(hash & (dcb->sorconf.link << 22)))
-                               return false;
-               }
-       default:
-               return true;
-       }
-}
-
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h
new file mode 100644 (file)
index 0000000..c35937e
--- /dev/null
@@ -0,0 +1,48 @@
+#ifndef __NVBIOS_DISP_H__
+#define __NVBIOS_DISP_H__
+
+u16 nvbios_disp_table(struct nouveau_bios *,
+                     u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub);
+
+struct nvbios_disp {
+       u16 data;
+};
+
+u16 nvbios_disp_entry(struct nouveau_bios *, u8 idx,
+                     u8 *ver, u8 *hdr__, u8 *sub);
+u16 nvbios_disp_parse(struct nouveau_bios *, u8 idx,
+                     u8 *ver, u8 *hdr__, u8 *sub,
+                     struct nvbios_disp *);
+
+struct nvbios_outp {
+       u16 type;
+       u16 mask;
+       u16 script[3];
+};
+
+u16 nvbios_outp_entry(struct nouveau_bios *, u8 idx,
+                     u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_outp_parse(struct nouveau_bios *, u8 idx,
+                     u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                     struct nvbios_outp *);
+u16 nvbios_outp_match(struct nouveau_bios *, u16 type, u16 mask,
+                     u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                     struct nvbios_outp *);
+
+
+struct nvbios_ocfg {
+       u16 match;
+       u16 clkcmp[2];
+};
+
+u16 nvbios_ocfg_entry(struct nouveau_bios *, u16 outp, u8 idx,
+                     u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_ocfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
+                     u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                     struct nvbios_ocfg *);
+u16 nvbios_ocfg_match(struct nouveau_bios *, u16 outp, u16 type,
+                     u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                     struct nvbios_ocfg *);
+u16 nvbios_oclk_match(struct nouveau_bios *, u16 cmp, u32 khz);
+
+#endif
index 73b5e5d..6e54218 100644 (file)
@@ -1,8 +1,34 @@
 #ifndef __NVBIOS_DP_H__
 #define __NVBIOS_DP_H__
 
-u16 dp_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 dp_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
-u16 dp_outp_match(struct nouveau_bios *, struct dcb_output *, u8 *ver, u8 *len);
+struct nvbios_dpout {
+       u16 type;
+       u16 mask;
+       u8  flags;
+       u32 script[5];
+       u32 lnkcmp;
+};
+
+u16 nvbios_dpout_parse(struct nouveau_bios *, u8 idx,
+                      u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                      struct nvbios_dpout *);
+u16 nvbios_dpout_match(struct nouveau_bios *, u16 type, u16 mask,
+                      u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                      struct nvbios_dpout *);
+
+struct nvbios_dpcfg {
+       u8 drv;
+       u8 pre;
+       u8 unk;
+};
+
+u16
+nvbios_dpcfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
+                  u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                  struct nvbios_dpcfg *);
+u16
+nvbios_dpcfg_match(struct nouveau_bios *, u16 outp, u8 un, u8 vs, u8 pe,
+                  u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                  struct nvbios_dpcfg *);
 
 #endif
index 39e73b9..41b7a6a 100644 (file)
@@ -54,6 +54,7 @@ int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
                        int clk, struct nouveau_pll_vals *);
 int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1,
                        struct nouveau_pll_vals *);
-
+int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
+                       int clk, struct nouveau_pll_vals *);
 
 #endif
index 5c1b5e1..da470e6 100644 (file)
@@ -69,8 +69,11 @@ struct nouveau_fb {
                } type;
                u64 stolen;
                u64 size;
+
                int ranks;
+               int parts;
 
+               int  (*init)(struct nouveau_fb *);
                int  (*get)(struct nouveau_fb *, u64 size, u32 align,
                            u32 size_nc, u32 type, struct nouveau_mem **);
                void (*put)(struct nouveau_fb *, struct nouveau_mem **);
@@ -84,6 +87,8 @@ struct nouveau_fb {
                int regions;
                void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size,
                             u32 pitch, u32 flags, struct nouveau_fb_tile *);
+               void (*comp)(struct nouveau_fb *, int i, u32 size, u32 flags,
+                            struct nouveau_fb_tile *);
                void (*fini)(struct nouveau_fb *, int i,
                             struct nouveau_fb_tile *);
                void (*prog)(struct nouveau_fb *, int i,
@@ -99,7 +104,7 @@ nouveau_fb(void *obj)
 
 #define nouveau_fb_create(p,e,c,d)                                             \
        nouveau_subdev_create((p), (e), (c), 0, "PFB", "fb", (d))
-int  nouveau_fb_created(struct nouveau_fb *);
+int  nouveau_fb_preinit(struct nouveau_fb *);
 void nouveau_fb_destroy(struct nouveau_fb *);
 int  nouveau_fb_init(struct nouveau_fb *);
 #define nouveau_fb_fini(p,s)                                                   \
@@ -111,9 +116,19 @@ int  _nouveau_fb_init(struct nouveau_object *);
 
 extern struct nouveau_oclass nv04_fb_oclass;
 extern struct nouveau_oclass nv10_fb_oclass;
+extern struct nouveau_oclass nv1a_fb_oclass;
 extern struct nouveau_oclass nv20_fb_oclass;
+extern struct nouveau_oclass nv25_fb_oclass;
 extern struct nouveau_oclass nv30_fb_oclass;
+extern struct nouveau_oclass nv35_fb_oclass;
+extern struct nouveau_oclass nv36_fb_oclass;
 extern struct nouveau_oclass nv40_fb_oclass;
+extern struct nouveau_oclass nv41_fb_oclass;
+extern struct nouveau_oclass nv44_fb_oclass;
+extern struct nouveau_oclass nv46_fb_oclass;
+extern struct nouveau_oclass nv47_fb_oclass;
+extern struct nouveau_oclass nv49_fb_oclass;
+extern struct nouveau_oclass nv4e_fb_oclass;
 extern struct nouveau_oclass nv50_fb_oclass;
 extern struct nouveau_oclass nvc0_fb_oclass;
 
@@ -122,13 +137,35 @@ int  nouveau_fb_bios_memtype(struct nouveau_bios *);
 
 bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
 
+void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+                      u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
 void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
 
+int  nv20_fb_vram_init(struct nouveau_fb *);
+void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+                      u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int  nv30_fb_init(struct nouveau_object *);
 void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
                       u32 pitch, u32 flags, struct nouveau_fb_tile *);
-void nv30_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+
+void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags,
+                      struct nouveau_fb_tile *);
+
+int  nv41_fb_vram_init(struct nouveau_fb *);
+int  nv41_fb_init(struct nouveau_object *);
+void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int  nv44_fb_vram_init(struct nouveau_fb *);
+int  nv44_fb_init(struct nouveau_object *);
+void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+                      u32 pitch, u32 flags, struct nouveau_fb_tile *);
 
 void nv50_fb_vram_del(struct nouveau_fb *, struct nouveau_mem **);
-void nv50_fb_trap(struct nouveau_fb *, int display);
 
 #endif
index cd01c53..d70ba34 100644 (file)
@@ -65,14 +65,14 @@ nouveau_barobj_dtor(struct nouveau_object *object)
 }
 
 static u32
-nouveau_barobj_rd32(struct nouveau_object *object, u32 addr)
+nouveau_barobj_rd32(struct nouveau_object *object, u64 addr)
 {
        struct nouveau_barobj *barobj = (void *)object;
        return ioread32_native(barobj->iomem + addr);
 }
 
 static void
-nouveau_barobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nouveau_barobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
        struct nouveau_barobj *barobj = (void *)object;
        iowrite32_native(data, barobj->iomem + addr);
index 70ca7d5..dd11194 100644 (file)
@@ -63,7 +63,7 @@ nouveau_bios_shadow_of(struct nouveau_bios *bios)
        struct pci_dev *pdev = nv_device(bios)->pdev;
        struct device_node *dn;
        const u32 *data;
-       int size, i;
+       int size;
 
        dn = pci_device_to_OF_node(pdev);
        if (!dn) {
@@ -210,11 +210,19 @@ nouveau_bios_shadow_acpi(struct nouveau_bios *bios)
                return;
 
        bios->data = kmalloc(bios->size, GFP_KERNEL);
-       for (i = 0; bios->data && i < bios->size; i += cnt) {
-               cnt = min((bios->size - i), (u32)4096);
-               ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt);
-               if (ret != cnt)
-                       break;
+       if (bios->data) {
+               /* disobey the acpi spec - much faster on at least w530 ... */
+               ret = nouveau_acpi_get_bios_chunk(bios->data, 0, bios->size);
+               if (ret != bios->size ||
+                   nvbios_checksum(bios->data, bios->size)) {
+                       /* ... that didn't work, ok, i'll be good now */
+                       for (i = 0; i < bios->size; i += cnt) {
+                               cnt = min((bios->size - i), (u32)4096);
+                               ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt);
+                               if (ret != cnt)
+                                       break;
+                       }
+               }
        }
 }
 
@@ -358,42 +366,42 @@ nouveau_bios_shadow(struct nouveau_bios *bios)
 }
 
 static u8
-nouveau_bios_rd08(struct nouveau_object *object, u32 addr)
+nouveau_bios_rd08(struct nouveau_object *object, u64 addr)
 {
        struct nouveau_bios *bios = (void *)object;
        return bios->data[addr];
 }
 
 static u16
-nouveau_bios_rd16(struct nouveau_object *object, u32 addr)
+nouveau_bios_rd16(struct nouveau_object *object, u64 addr)
 {
        struct nouveau_bios *bios = (void *)object;
        return get_unaligned_le16(&bios->data[addr]);
 }
 
 static u32
-nouveau_bios_rd32(struct nouveau_object *object, u32 addr)
+nouveau_bios_rd32(struct nouveau_object *object, u64 addr)
 {
        struct nouveau_bios *bios = (void *)object;
        return get_unaligned_le32(&bios->data[addr]);
 }
 
 static void
-nouveau_bios_wr08(struct nouveau_object *object, u32 addr, u8 data)
+nouveau_bios_wr08(struct nouveau_object *object, u64 addr, u8 data)
 {
        struct nouveau_bios *bios = (void *)object;
        bios->data[addr] = data;
 }
 
 static void
-nouveau_bios_wr16(struct nouveau_object *object, u32 addr, u16 data)
+nouveau_bios_wr16(struct nouveau_object *object, u64 addr, u16 data)
 {
        struct nouveau_bios *bios = (void *)object;
        put_unaligned_le16(data, &bios->data[addr]);
 }
 
 static void
-nouveau_bios_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nouveau_bios_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
        struct nouveau_bios *bios = (void *)object;
        put_unaligned_le32(data, &bios->data[addr]);
index 7d75038..0fd87df 100644 (file)
@@ -64,7 +64,7 @@ dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
                }
        } else
        if (*ver >= 0x15) {
-               if (!nv_strncmp(bios, dcb - 7, 7, "DEV_REC")) {
+               if (!nv_memcmp(bios, dcb - 7, "DEV_REC", 7)) {
                        u16 i2c = nv_ro16(bios, dcb + 2);
                        *hdr = 4;
                        *cnt = (i2c - dcb) / 10;
@@ -107,6 +107,69 @@ dcb_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
        return 0x0000;
 }
 
+u16
+dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
+              struct dcb_output *outp)
+{
+       u16 dcb = dcb_outp(bios, idx, ver, len);
+       if (dcb) {
+               if (*ver >= 0x20) {
+                       u32 conn = nv_ro32(bios, dcb + 0x00);
+                       outp->or        = (conn & 0x0f000000) >> 24;
+                       outp->location  = (conn & 0x00300000) >> 20;
+                       outp->bus       = (conn & 0x000f0000) >> 16;
+                       outp->connector = (conn & 0x0000f000) >> 12;
+                       outp->heads     = (conn & 0x00000f00) >> 8;
+                       outp->i2c_index = (conn & 0x000000f0) >> 4;
+                       outp->type      = (conn & 0x0000000f);
+                       outp->link      = 0;
+               } else {
+                       dcb = 0x0000;
+               }
+
+               if (*ver >= 0x40) {
+                       u32 conf = nv_ro32(bios, dcb + 0x04);
+                       switch (outp->type) {
+                       case DCB_OUTPUT_TMDS:
+                       case DCB_OUTPUT_LVDS:
+                       case DCB_OUTPUT_DP:
+                               outp->link = (conf & 0x00000030) >> 4;
+                               outp->sorconf.link = outp->link; /*XXX*/
+                               break;
+                       default:
+                               break;
+                       }
+               }
+       }
+       return dcb;
+}
+
+static inline u16
+dcb_outp_hasht(struct dcb_output *outp)
+{
+       return outp->type;
+}
+
+static inline u16
+dcb_outp_hashm(struct dcb_output *outp)
+{
+       return (outp->heads << 8) | (outp->link << 6) | outp->or;
+}
+
+u16
+dcb_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
+              u8 *ver, u8 *len, struct dcb_output *outp)
+{
+       u16 dcb, idx = 0;
+       while ((dcb = dcb_outp_parse(bios, idx++, ver, len, outp))) {
+               if (dcb_outp_hasht(outp) == type) {
+                       if ((dcb_outp_hashm(outp) & mask) == mask)
+                               break;
+               }
+       }
+       return dcb;
+}
+
 int
 dcb_outp_foreach(struct nouveau_bios *bios, void *data,
                 int (*exec)(struct nouveau_bios *, void *, int, u16))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
new file mode 100644 (file)
index 0000000..7f16e52
--- /dev/null
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/disp.h>
+
+u16
+nvbios_disp_table(struct nouveau_bios *bios,
+                 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub)
+{
+       struct bit_entry U;
+
+       if (!bit_entry(bios, 'U', &U)) {
+               if (U.version == 1) {
+                       u16 data = nv_ro16(bios, U.offset);
+                       if (data) {
+                               *ver = nv_ro08(bios, data + 0x00);
+                               switch (*ver) {
+                               case 0x20:
+                               case 0x21:
+                                       *hdr = nv_ro08(bios, data + 0x01);
+                                       *len = nv_ro08(bios, data + 0x02);
+                                       *cnt = nv_ro08(bios, data + 0x03);
+                                       *sub = nv_ro08(bios, data + 0x04);
+                                       return data;
+                               default:
+                                       break;
+                               }
+                       }
+               }
+       }
+
+       return 0x0000;
+}
+
+u16
+nvbios_disp_entry(struct nouveau_bios *bios, u8 idx,
+                 u8 *ver, u8 *len, u8 *sub)
+{
+       u8  hdr, cnt;
+       u16 data = nvbios_disp_table(bios, ver, &hdr, &cnt, len, sub);
+       if (data && idx < cnt)
+               return data + hdr + (idx * *len);
+       *ver = 0x00;
+       return 0x0000;
+}
+
+u16
+nvbios_disp_parse(struct nouveau_bios *bios, u8 idx,
+                 u8 *ver, u8 *len, u8 *sub,
+                 struct nvbios_disp *info)
+{
+       u16 data = nvbios_disp_entry(bios, idx, ver, len, sub);
+       if (data && *len >= 2) {
+               info->data = nv_ro16(bios, data + 0);
+               return data;
+       }
+       return 0x0000;
+}
+
+u16
+nvbios_outp_entry(struct nouveau_bios *bios, u8 idx,
+                 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+       struct nvbios_disp info;
+       u16 data = nvbios_disp_parse(bios, idx, ver, len, hdr, &info);
+       if (data) {
+               *cnt = nv_ro08(bios, info.data + 0x05);
+               *len = 0x06;
+               data = info.data;
+       }
+       return data;
+}
+
+u16
+nvbios_outp_parse(struct nouveau_bios *bios, u8 idx,
+                 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                 struct nvbios_outp *info)
+{
+       u16 data = nvbios_outp_entry(bios, idx, ver, hdr, cnt, len);
+       if (data && *hdr >= 0x0a) {
+               info->type      = nv_ro16(bios, data + 0x00);
+               info->mask      = nv_ro32(bios, data + 0x02);
+               if (*ver <= 0x20) /* match any link */
+                       info->mask |= 0x00c0;
+               info->script[0] = nv_ro16(bios, data + 0x06);
+               info->script[1] = nv_ro16(bios, data + 0x08);
+               info->script[2] = 0x0000;
+               if (*hdr >= 0x0c)
+                       info->script[2] = nv_ro16(bios, data + 0x0a);
+               return data;
+       }
+       return 0x0000;
+}
+
+u16
+nvbios_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
+                 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                 struct nvbios_outp *info)
+{
+       u16 data, idx = 0;
+       while ((data = nvbios_outp_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
+               if (data && info->type == type) {
+                       if ((info->mask & mask) == mask)
+                               break;
+               }
+       }
+       return data;
+}
+
+u16
+nvbios_ocfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
+                 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+       if (idx < *cnt)
+               return outp + *hdr + (idx * *len);
+       return 0x0000;
+}
+
+u16
+nvbios_ocfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
+                 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                 struct nvbios_ocfg *info)
+{
+       u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
+       if (data) {
+               info->match     = nv_ro16(bios, data + 0x00);
+               info->clkcmp[0] = nv_ro16(bios, data + 0x02);
+               info->clkcmp[1] = nv_ro16(bios, data + 0x04);
+       }
+       return data;
+}
+
+u16
+nvbios_ocfg_match(struct nouveau_bios *bios, u16 outp, u16 type,
+                 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                 struct nvbios_ocfg *info)
+{
+       u16 data, idx = 0;
+       while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
+               if (info->match == type)
+                       break;
+       }
+       return data;
+}
+
+u16
+nvbios_oclk_match(struct nouveau_bios *bios, u16 cmp, u32 khz)
+{
+       while (cmp) {
+               if (khz / 10 >= nv_ro16(bios, cmp + 0x00))
+                       return  nv_ro16(bios, cmp + 0x02);
+               cmp += 0x04;
+       }
+       return 0x0000;
+}
index 3cbc0f3..663853b 100644 (file)
 
 #include "subdev/bios.h"
 #include "subdev/bios/bit.h"
-#include "subdev/bios/dcb.h"
 #include "subdev/bios/dp.h"
 
-u16
-dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+static u16
+nvbios_dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
-       struct bit_entry bit_d;
+       struct bit_entry d;
 
-       if (!bit_entry(bios, 'd', &bit_d)) {
-               if (bit_d.version == 1) {
-                       u16 data = nv_ro16(bios, bit_d.offset);
+       if (!bit_entry(bios, 'd', &d)) {
+               if (d.version == 1 && d.length >= 2) {
+                       u16 data = nv_ro16(bios, d.offset);
                        if (data) {
-                               *ver = nv_ro08(bios, data + 0);
-                               *hdr = nv_ro08(bios, data + 1);
-                               *len = nv_ro08(bios, data + 2);
-                               *cnt = nv_ro08(bios, data + 3);
-                               return data;
+                               *ver = nv_ro08(bios, data + 0x00);
+                               switch (*ver) {
+                               case 0x21:
+                               case 0x30:
+                               case 0x40:
+                                       *hdr = nv_ro08(bios, data + 0x01);
+                                       *len = nv_ro08(bios, data + 0x02);
+                                       *cnt = nv_ro08(bios, data + 0x03);
+                                       return data;
+                               default:
+                                       break;
+                               }
                        }
                }
        }
@@ -49,28 +55,150 @@ dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
        return 0x0000;
 }
 
+static u16
+nvbios_dpout_entry(struct nouveau_bios *bios, u8 idx,
+                  u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+       u16 data = nvbios_dp_table(bios, ver, hdr, cnt, len);
+       if (data && idx < *cnt) {
+               u16 outp = nv_ro16(bios, data + *hdr + idx * *len);
+               switch (*ver * !!outp) {
+               case 0x21:
+               case 0x30:
+                       *hdr = nv_ro08(bios, data + 0x04);
+                       *len = nv_ro08(bios, data + 0x05);
+                       *cnt = nv_ro08(bios, outp + 0x04);
+                       break;
+               case 0x40:
+                       *hdr = nv_ro08(bios, data + 0x04);
+                       *cnt = 0;
+                       *len = 0;
+                       break;
+               default:
+                       break;
+               }
+               return outp;
+       }
+       *ver = 0x00;
+       return 0x0000;
+}
+
 u16
-dp_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
+nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx,
+                  u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                  struct nvbios_dpout *info)
 {
-       u8  hdr, cnt;
-       u16 table = dp_table(bios, ver, &hdr, &cnt, len);
-       if (table && idx < cnt)
-               return nv_ro16(bios, table + hdr + (idx * *len));
-       return 0xffff;
+       u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len);
+       if (data && *ver) {
+               info->type = nv_ro16(bios, data + 0x00);
+               info->mask = nv_ro16(bios, data + 0x02);
+               switch (*ver) {
+               case 0x21:
+               case 0x30:
+                       info->flags     = nv_ro08(bios, data + 0x05);
+                       info->script[0] = nv_ro16(bios, data + 0x06);
+                       info->script[1] = nv_ro16(bios, data + 0x08);
+                       info->lnkcmp    = nv_ro16(bios, data + 0x0a);
+                       info->script[2] = nv_ro16(bios, data + 0x0c);
+                       info->script[3] = nv_ro16(bios, data + 0x0e);
+                       info->script[4] = nv_ro16(bios, data + 0x10);
+                       break;
+               case 0x40:
+                       info->flags     = nv_ro08(bios, data + 0x04);
+                       info->script[0] = nv_ro16(bios, data + 0x05);
+                       info->script[1] = nv_ro16(bios, data + 0x07);
+                       info->lnkcmp    = nv_ro16(bios, data + 0x09);
+                       info->script[2] = nv_ro16(bios, data + 0x0b);
+                       info->script[3] = nv_ro16(bios, data + 0x0d);
+                       info->script[4] = nv_ro16(bios, data + 0x0f);
+                       break;
+               default:
+                       data = 0x0000;
+                       break;
+               }
+       }
+       return data;
 }
 
 u16
-dp_outp_match(struct nouveau_bios *bios, struct dcb_output *outp,
-             u8 *ver, u8 *len)
+nvbios_dpout_match(struct nouveau_bios *bios, u16 type, u16 mask,
+                  u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                  struct nvbios_dpout *info)
 {
-       u8  idx = 0;
-       u16 data;
-       while ((data = dp_outp(bios, idx++, ver, len)) != 0xffff) {
-               if (data) {
-                       u32 hash = nv_ro32(bios, data);
-                       if (dcb_hash_match(outp, hash))
-                               return data;
+       u16 data, idx = 0;
+       while ((data = nvbios_dpout_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
+               if (data && info->type == type) {
+                       if ((info->mask & mask) == mask)
+                               break;
                }
        }
+       return data;
+}
+
+static u16
+nvbios_dpcfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
+                  u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+       if (*ver >= 0x40) {
+               outp = nvbios_dp_table(bios, ver, hdr, cnt, len);
+               *hdr = *hdr + (*len * * cnt);
+               *len = nv_ro08(bios, outp + 0x06);
+               *cnt = nv_ro08(bios, outp + 0x07);
+       }
+
+       if (idx < *cnt)
+               return outp + *hdr + (idx * *len);
+
        return 0x0000;
 }
+
+u16
+nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
+                  u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                  struct nvbios_dpcfg *info)
+{
+       u16 data = nvbios_dpcfg_entry(bios, outp, idx, ver, hdr, cnt, len);
+       if (data) {
+               switch (*ver) {
+               case 0x21:
+                       info->drv = nv_ro08(bios, data + 0x02);
+                       info->pre = nv_ro08(bios, data + 0x03);
+                       info->unk = nv_ro08(bios, data + 0x04);
+                       break;
+               case 0x30:
+               case 0x40:
+                       info->drv = nv_ro08(bios, data + 0x01);
+                       info->pre = nv_ro08(bios, data + 0x02);
+                       info->unk = nv_ro08(bios, data + 0x03);
+                       break;
+               default:
+                       data = 0x0000;
+                       break;
+               }
+       }
+       return data;
+}
+
+u16
+nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 un, u8 vs, u8 pe,
+                  u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                  struct nvbios_dpcfg *info)
+{
+       u8 idx = 0xff;
+       u16 data;
+
+       if (*ver >= 0x30) {
+               const u8 vsoff[] = { 0, 4, 7, 9 };
+               idx = (un * 10) + vsoff[vs] + pe;
+       } else {
+               while ((data = nvbios_dpcfg_entry(bios, outp, idx,
+                                                 ver, hdr, cnt, len))) {
+                       if (nv_ro08(bios, data + 0x00) == vs &&
+                           nv_ro08(bios, data + 0x01) == pe)
+                               break;
+                       idx++;
+               }
+       }
+
+       return nvbios_dpcfg_parse(bios, outp, pe, ver, hdr, cnt, len, info);
+}
index 4c9f1e5..c90d4aa 100644 (file)
@@ -101,8 +101,8 @@ dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line,
        }
 
        /* DCB 2.2, fixed TVDAC GPIO data */
-       if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len)) && ver >= 0x22) {
-               if (func == DCB_GPIO_TVDAC0) {
+       if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len))) {
+               if (ver >= 0x22 && ver < 0x30 && func == DCB_GPIO_TVDAC0) {
                        u8 conf = nv_ro08(bios, entry - 5);
                        u8 addr = nv_ro08(bios, entry - 4);
                        if (conf & 0x01) {
index 6be8c32..ae168bb 100644 (file)
@@ -743,9 +743,10 @@ static void
 init_dp_condition(struct nvbios_init *init)
 {
        struct nouveau_bios *bios = init->bios;
+       struct nvbios_dpout info;
        u8  cond = nv_ro08(bios, init->offset + 1);
        u8  unkn = nv_ro08(bios, init->offset + 2);
-       u8  ver, len;
+       u8  ver, hdr, cnt, len;
        u16 data;
 
        trace("DP_CONDITION\t0x%02x 0x%02x\n", cond, unkn);
@@ -759,10 +760,12 @@ init_dp_condition(struct nvbios_init *init)
        case 1:
        case 2:
                if ( init->outp &&
-                   (data = dp_outp_match(bios, init->outp, &ver, &len))) {
-                       if (ver <= 0x40 && !(nv_ro08(bios, data + 5) & cond))
-                               init_exec_set(init, false);
-                       if (ver == 0x40 && !(nv_ro08(bios, data + 4) & cond))
+                   (data = nvbios_dpout_match(bios, DCB_OUTPUT_DP,
+                                              (init->outp->or << 0) |
+                                              (init->outp->sorconf.link << 6),
+                                              &ver, &hdr, &cnt, &len, &info)))
+               {
+                       if (!(info.flags & cond))
                                init_exec_set(init, false);
                        break;
                }
index cc8d7d1..9068c98 100644 (file)
@@ -66,6 +66,24 @@ nva3_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
        return ret;
 }
 
+int
+nva3_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info,
+                   int clk, struct nouveau_pll_vals *pv)
+{
+       int ret, N, M, P;
+
+       ret = nva3_pll_calc(clock, info, clk, &N, NULL, &M, &P);
+
+       if (ret > 0) {
+               pv->refclk = info->refclk;
+               pv->N1 = N;
+               pv->M1 = M;
+               pv->log2P = P;
+       }
+       return ret;
+}
+
+
 static int
 nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                struct nouveau_oclass *oclass, void *data, u32 size,
@@ -80,6 +98,7 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                return ret;
 
        priv->base.pll_set = nva3_clock_pll_set;
+       priv->base.pll_calc = nva3_clock_pll_calc;
        return 0;
 }
 
index 5ccce0b..f6962c9 100644 (file)
@@ -79,6 +79,7 @@ nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                return ret;
 
        priv->base.pll_set = nvc0_clock_pll_set;
+       priv->base.pll_calc = nva3_clock_pll_calc;
        return 0;
 }
 
index ca9a464..f8a7ed4 100644 (file)
@@ -25,7 +25,6 @@
 #include <core/object.h>
 #include <core/device.h>
 #include <core/client.h>
-#include <core/device.h>
 #include <core/option.h>
 
 #include <core/class.h>
@@ -61,19 +60,24 @@ struct nouveau_devobj {
 
 static const u64 disable_map[] = {
        [NVDEV_SUBDEV_VBIOS]    = NV_DEVICE_DISABLE_VBIOS,
+       [NVDEV_SUBDEV_DEVINIT]  = NV_DEVICE_DISABLE_CORE,
        [NVDEV_SUBDEV_GPIO]     = NV_DEVICE_DISABLE_CORE,
        [NVDEV_SUBDEV_I2C]      = NV_DEVICE_DISABLE_CORE,
-       [NVDEV_SUBDEV_DEVINIT]  = NV_DEVICE_DISABLE_CORE,
+       [NVDEV_SUBDEV_CLOCK]    = NV_DEVICE_DISABLE_CORE,
+       [NVDEV_SUBDEV_MXM]      = NV_DEVICE_DISABLE_CORE,
        [NVDEV_SUBDEV_MC]       = NV_DEVICE_DISABLE_CORE,
        [NVDEV_SUBDEV_TIMER]    = NV_DEVICE_DISABLE_CORE,
        [NVDEV_SUBDEV_FB]       = NV_DEVICE_DISABLE_CORE,
-       [NVDEV_SUBDEV_VM]       = NV_DEVICE_DISABLE_CORE,
+       [NVDEV_SUBDEV_LTCG]     = NV_DEVICE_DISABLE_CORE,
+       [NVDEV_SUBDEV_IBUS]     = NV_DEVICE_DISABLE_CORE,
        [NVDEV_SUBDEV_INSTMEM]  = NV_DEVICE_DISABLE_CORE,
+       [NVDEV_SUBDEV_VM]       = NV_DEVICE_DISABLE_CORE,
        [NVDEV_SUBDEV_BAR]      = NV_DEVICE_DISABLE_CORE,
        [NVDEV_SUBDEV_VOLT]     = NV_DEVICE_DISABLE_CORE,
-       [NVDEV_SUBDEV_CLOCK]    = NV_DEVICE_DISABLE_CORE,
        [NVDEV_SUBDEV_THERM]    = NV_DEVICE_DISABLE_CORE,
        [NVDEV_ENGINE_DMAOBJ]   = NV_DEVICE_DISABLE_CORE,
+       [NVDEV_ENGINE_FIFO]     = NV_DEVICE_DISABLE_FIFO,
+       [NVDEV_ENGINE_SW]       = NV_DEVICE_DISABLE_FIFO,
        [NVDEV_ENGINE_GR]       = NV_DEVICE_DISABLE_GRAPH,
        [NVDEV_ENGINE_MPEG]     = NV_DEVICE_DISABLE_MPEG,
        [NVDEV_ENGINE_ME]       = NV_DEVICE_DISABLE_ME,
@@ -84,7 +88,7 @@ static const u64 disable_map[] = {
        [NVDEV_ENGINE_COPY0]    = NV_DEVICE_DISABLE_COPY0,
        [NVDEV_ENGINE_COPY1]    = NV_DEVICE_DISABLE_COPY1,
        [NVDEV_ENGINE_UNK1C1]   = NV_DEVICE_DISABLE_UNK1C1,
-       [NVDEV_ENGINE_FIFO]     = NV_DEVICE_DISABLE_FIFO,
+       [NVDEV_ENGINE_VENC]     = NV_DEVICE_DISABLE_VENC,
        [NVDEV_ENGINE_DISP]     = NV_DEVICE_DISABLE_DISP,
        [NVDEV_SUBDEV_NR]       = 0,
 };
@@ -208,7 +212,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
 
                /* determine frequency of timing crystal */
                if ( device->chipset < 0x17 ||
-                   (device->chipset >= 0x20 && device->chipset <= 0x25))
+                   (device->chipset >= 0x20 && device->chipset < 0x25))
                        strap &= 0x00000040;
                else
                        strap &= 0x00400040;
@@ -356,37 +360,37 @@ fail:
 }
 
 static u8
-nouveau_devobj_rd08(struct nouveau_object *object, u32 addr)
+nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
 {
        return nv_rd08(object->engine, addr);
 }
 
 static u16
-nouveau_devobj_rd16(struct nouveau_object *object, u32 addr)
+nouveau_devobj_rd16(struct nouveau_object *object, u64 addr)
 {
        return nv_rd16(object->engine, addr);
 }
 
 static u32
-nouveau_devobj_rd32(struct nouveau_object *object, u32 addr)
+nouveau_devobj_rd32(struct nouveau_object *object, u64 addr)
 {
        return nv_rd32(object->engine, addr);
 }
 
 static void
-nouveau_devobj_wr08(struct nouveau_object *object, u32 addr, u8 data)
+nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data)
 {
        nv_wr08(object->engine, addr, data);
 }
 
 static void
-nouveau_devobj_wr16(struct nouveau_object *object, u32 addr, u16 data)
+nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data)
 {
        nv_wr16(object->engine, addr, data);
 }
 
 static void
-nouveau_devobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
        nv_wr32(object->engine, addr, data);
 }
index f09accf..9c40b0f 100644 (file)
@@ -105,7 +105,7 @@ nv10_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv1a_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -159,7 +159,7 @@ nv10_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv1a_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
index 5fa58b7..74f88f4 100644 (file)
@@ -72,7 +72,7 @@ nv20_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv20_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv25_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -90,7 +90,7 @@ nv20_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv20_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv25_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -108,7 +108,7 @@ nv20_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv20_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv25_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
index 7f4b8fe..0ac1b2c 100644 (file)
@@ -72,7 +72,7 @@ nv30_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv30_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv35_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -109,7 +109,7 @@ nv30_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv30_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv36_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -128,7 +128,7 @@ nv30_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv30_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
index 42deadc..41d5968 100644 (file)
@@ -76,7 +76,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv41_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -96,7 +96,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv41_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -116,7 +116,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv41_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -156,7 +156,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv47_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -176,7 +176,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv49_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -196,7 +196,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv49_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -216,7 +216,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv44_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -236,7 +236,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -256,7 +256,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv44_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -276,7 +276,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -296,7 +296,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv4e_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -316,7 +316,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -336,7 +336,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -356,7 +356,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
index fec3bcc..6ccfd85 100644 (file)
@@ -98,7 +98,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
                device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nv84_disp_oclass;
                break;
        case 0x86:
                device->cname = "G86";
@@ -123,7 +123,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
                device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nv84_disp_oclass;
                break;
        case 0x92:
                device->cname = "G92";
@@ -148,7 +148,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
                device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nv84_disp_oclass;
                break;
        case 0x94:
                device->cname = "G94";
@@ -173,7 +173,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
                device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
                break;
        case 0x96:
                device->cname = "G96";
@@ -198,7 +198,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
                device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
                break;
        case 0x98:
                device->cname = "G98";
@@ -223,7 +223,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv98_crypt_oclass;
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
                break;
        case 0xa0:
                device->cname = "G200";
@@ -248,7 +248,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
                device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva0_disp_oclass;
                break;
        case 0xaa:
                device->cname = "MCP77/MCP78";
@@ -273,7 +273,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv98_crypt_oclass;
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
                break;
        case 0xac:
                device->cname = "MCP79/MCP7A";
@@ -298,7 +298,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv98_crypt_oclass;
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
                break;
        case 0xa3:
                device->cname = "GT215";
@@ -324,7 +324,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xa5:
                device->cname = "GT216";
@@ -349,7 +349,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xa8:
                device->cname = "GT218";
@@ -374,7 +374,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xaf:
                device->cname = "MCP89";
@@ -399,7 +399,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        default:
                nv_fatal(device, "unknown Tesla chipset\n");
index 6697f0f..f046168 100644 (file)
@@ -74,12 +74,12 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
                device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
                device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-               device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-               device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
                device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xc4:
                device->cname = "GF104";
@@ -102,12 +102,12 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
                device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
                device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-               device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-               device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
                device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xc3:
                device->cname = "GF106";
@@ -130,12 +130,12 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
                device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
                device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-               device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-               device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
                device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xce:
                device->cname = "GF114";
@@ -158,12 +158,12 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
                device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
                device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-               device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-               device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
                device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xcf:
                device->cname = "GF116";
@@ -186,12 +186,12 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
                device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
                device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-               device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-               device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
                device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xc1:
                device->cname = "GF108";
@@ -214,12 +214,12 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
                device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
                device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-               device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-               device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
                device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xc8:
                device->cname = "GF110";
@@ -242,12 +242,12 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
                device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
                device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-               device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-               device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
                device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xd9:
                device->cname = "GF119";
@@ -266,13 +266,13 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
-               device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+               device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
                device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
                device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-               device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-               device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
                device->oclass[NVDEV_ENGINE_DISP   ] = &nvd0_disp_oclass;
                break;
index 4a280b7..9b7881e 100644 (file)
@@ -45,6 +45,9 @@
 #include <engine/graph.h>
 #include <engine/disp.h>
 #include <engine/copy.h>
+#include <engine/bsp.h>
+#include <engine/vp.h>
+#include <engine/ppp.h>
 
 int
 nve0_identify(struct nouveau_device *device)
@@ -67,13 +70,16 @@ nve0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
-               device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+               device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] = &nve0_fifo_oclass;
                device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
                device->oclass[NVDEV_ENGINE_GR     ] = &nve0_graph_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nvd0_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nve0_disp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
                device->oclass[NVDEV_ENGINE_COPY1  ] = &nve0_copy1_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                break;
        case 0xe7:
                device->cname = "GK107";
@@ -92,13 +98,16 @@ nve0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
-               device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+               device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] = &nve0_fifo_oclass;
                device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
                device->oclass[NVDEV_ENGINE_GR     ] = &nve0_graph_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nvd0_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nve0_disp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
                device->oclass[NVDEV_ENGINE_COPY1  ] = &nve0_copy1_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                break;
        default:
                nv_fatal(device, "unknown Kepler chipset\n");
index 61becfa..ae7249b 100644 (file)
  * Authors: Ben Skeggs
  */
 
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
 #include <subdev/devinit.h>
 #include <subdev/vga.h>
 
@@ -55,7 +59,12 @@ nv50_devinit_dtor(struct nouveau_object *object)
 static int
 nv50_devinit_init(struct nouveau_object *object)
 {
+       struct nouveau_bios *bios = nouveau_bios(object);
        struct nv50_devinit_priv *priv = (void *)object;
+       struct nvbios_outp info;
+       struct dcb_output outp;
+       u8  ver = 0xff, hdr, cnt, len;
+       int ret, i = 0;
 
        if (!priv->base.post) {
                if (!nv_rdvgac(priv, 0, 0x00) &&
@@ -65,7 +74,30 @@ nv50_devinit_init(struct nouveau_object *object)
                }
        }
 
-       return nouveau_devinit_init(&priv->base);
+       ret = nouveau_devinit_init(&priv->base);
+       if (ret)
+               return ret;
+
+       /* if we ran the init tables, execute first script pointer for each
+        * display table output entry that has a matching dcb entry.
+        */
+       while (priv->base.post && ver) {
+               u16 data = nvbios_outp_parse(bios, i++, &ver, &hdr, &cnt, &len, &info);
+               if (data && dcb_outp_match(bios, info.type, info.mask, &ver, &len, &outp)) {
+                       struct nvbios_init init = {
+                               .subdev = nv_subdev(priv),
+                               .bios = bios,
+                               .offset = info.script[0],
+                               .outp = &outp,
+                               .crtc = -1,
+                               .execute = 1,
+                       };
+
+                       nvbios_exec(&init);
+               }
+       };
+
+       return 0;
 }
 
 static int
index f0086de..d6d1600 100644 (file)
@@ -57,25 +57,45 @@ nouveau_fb_bios_memtype(struct nouveau_bios *bios)
 }
 
 int
-nouveau_fb_init(struct nouveau_fb *pfb)
+nouveau_fb_preinit(struct nouveau_fb *pfb)
 {
-       int ret, i;
+       static const char *name[] = {
+               [NV_MEM_TYPE_UNKNOWN] = "unknown",
+               [NV_MEM_TYPE_STOLEN ] = "stolen system memory",
+               [NV_MEM_TYPE_SGRAM  ] = "SGRAM",
+               [NV_MEM_TYPE_SDRAM  ] = "SDRAM",
+               [NV_MEM_TYPE_DDR1   ] = "DDR1",
+               [NV_MEM_TYPE_DDR2   ] = "DDR2",
+               [NV_MEM_TYPE_DDR3   ] = "DDR3",
+               [NV_MEM_TYPE_GDDR2  ] = "GDDR2",
+               [NV_MEM_TYPE_GDDR3  ] = "GDDR3",
+               [NV_MEM_TYPE_GDDR4  ] = "GDDR4",
+               [NV_MEM_TYPE_GDDR5  ] = "GDDR5",
+       };
+       int ret, tags;
 
-       ret = nouveau_subdev_init(&pfb->base);
-       if (ret)
-               return ret;
+       tags = pfb->ram.init(pfb);
+       if (tags < 0 || !pfb->ram.size) {
+               nv_fatal(pfb, "error detecting memory configuration!!\n");
+               return (tags < 0) ? tags : -ERANGE;
+       }
 
-       for (i = 0; i < pfb->tile.regions; i++)
-               pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
+       if (!nouveau_mm_initialised(&pfb->vram)) {
+               ret = nouveau_mm_init(&pfb->vram, 0, pfb->ram.size >> 12, 1);
+               if (ret)
+                       return ret;
+       }
 
-       return 0;
-}
+       if (!nouveau_mm_initialised(&pfb->tags) && tags) {
+               ret = nouveau_mm_init(&pfb->tags, 0, ++tags, 1);
+               if (ret)
+                       return ret;
+       }
 
-int
-_nouveau_fb_init(struct nouveau_object *object)
-{
-       struct nouveau_fb *pfb = (void *)object;
-       return nouveau_fb_init(pfb);
+       nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]);
+       nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20));
+       nv_info(pfb, "   ZCOMP: %d tags\n", tags);
+       return 0;
 }
 
 void
@@ -85,12 +105,8 @@ nouveau_fb_destroy(struct nouveau_fb *pfb)
 
        for (i = 0; i < pfb->tile.regions; i++)
                pfb->tile.fini(pfb, i, &pfb->tile.region[i]);
-
-       if (pfb->tags.block_size)
-               nouveau_mm_fini(&pfb->tags);
-
-       if (pfb->vram.block_size)
-               nouveau_mm_fini(&pfb->vram);
+       nouveau_mm_fini(&pfb->tags);
+       nouveau_mm_fini(&pfb->vram);
 
        nouveau_subdev_destroy(&pfb->base);
 }
@@ -101,30 +117,24 @@ _nouveau_fb_dtor(struct nouveau_object *object)
        struct nouveau_fb *pfb = (void *)object;
        nouveau_fb_destroy(pfb);
 }
-
 int
-nouveau_fb_created(struct nouveau_fb *pfb)
+nouveau_fb_init(struct nouveau_fb *pfb)
 {
-       static const char *name[] = {
-               [NV_MEM_TYPE_UNKNOWN] = "unknown",
-               [NV_MEM_TYPE_STOLEN ] = "stolen system memory",
-               [NV_MEM_TYPE_SGRAM  ] = "SGRAM",
-               [NV_MEM_TYPE_SDRAM  ] = "SDRAM",
-               [NV_MEM_TYPE_DDR1   ] = "DDR1",
-               [NV_MEM_TYPE_DDR2   ] = "DDR2",
-               [NV_MEM_TYPE_DDR3   ] = "DDR3",
-               [NV_MEM_TYPE_GDDR2  ] = "GDDR2",
-               [NV_MEM_TYPE_GDDR3  ] = "GDDR3",
-               [NV_MEM_TYPE_GDDR4  ] = "GDDR4",
-               [NV_MEM_TYPE_GDDR5  ] = "GDDR5",
-       };
+       int ret, i;
 
-       if (pfb->ram.size == 0) {
-               nv_fatal(pfb, "no vram detected!!\n");
-               return -ERANGE;
-       }
+       ret = nouveau_subdev_init(&pfb->base);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < pfb->tile.regions; i++)
+               pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
 
-       nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]);
-       nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20));
        return 0;
 }
+
+int
+_nouveau_fb_init(struct nouveau_object *object)
+{
+       struct nouveau_fb *pfb = (void *)object;
+       return nouveau_fb_init(pfb);
+}
index eb06836..6e369f8 100644 (file)
@@ -56,6 +56,37 @@ nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
 }
 
 static int
+nv04_fb_vram_init(struct nouveau_fb *pfb)
+{
+       u32 boot0 = nv_rd32(pfb, NV04_PFB_BOOT_0);
+       if (boot0 & 0x00000100) {
+               pfb->ram.size  = ((boot0 >> 12) & 0xf) * 2 + 2;
+               pfb->ram.size *= 1024 * 1024;
+       } else {
+               switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
+               case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
+                       pfb->ram.size = 32 * 1024 * 1024;
+                       break;
+               case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
+                       pfb->ram.size = 16 * 1024 * 1024;
+                       break;
+               case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
+                       pfb->ram.size = 8 * 1024 * 1024;
+                       break;
+               case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
+                       pfb->ram.size = 4 * 1024 * 1024;
+                       break;
+               }
+       }
+
+       if ((boot0 & 0x00000038) <= 0x10)
+               pfb->ram.type = NV_MEM_TYPE_SGRAM;
+       else
+               pfb->ram.type = NV_MEM_TYPE_SDRAM;
+       return 0;
+}
+
+static int
 nv04_fb_init(struct nouveau_object *object)
 {
        struct nv04_fb_priv *priv = (void *)object;
@@ -79,7 +110,6 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
             struct nouveau_object **pobject)
 {
        struct nv04_fb_priv *priv;
-       u32 boot0;
        int ret;
 
        ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -87,35 +117,9 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       boot0 = nv_rd32(priv, NV04_PFB_BOOT_0);
-       if (boot0 & 0x00000100) {
-               priv->base.ram.size  = ((boot0 >> 12) & 0xf) * 2 + 2;
-               priv->base.ram.size *= 1024 * 1024;
-       } else {
-               switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
-               case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
-                       priv->base.ram.size = 32 * 1024 * 1024;
-                       break;
-               case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
-                       priv->base.ram.size = 16 * 1024 * 1024;
-                       break;
-               case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
-                       priv->base.ram.size = 8 * 1024 * 1024;
-                       break;
-               case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
-                       priv->base.ram.size = 4 * 1024 * 1024;
-                       break;
-               }
-       }
-
-       if ((boot0 & 0x00000038) <= 0x10)
-               priv->base.ram.type = NV_MEM_TYPE_SGRAM;
-       else
-               priv->base.ram.type = NV_MEM_TYPE_SDRAM;
-
-
        priv->base.memtype_valid = nv04_fb_memtype_valid;
-       return nouveau_fb_created(&priv->base);
+       priv->base.ram.init = nv04_fb_vram_init;
+       return nouveau_fb_preinit(&priv->base);
 }
 
 struct nouveau_oclass
index f037a42..edbbe26 100644 (file)
@@ -30,7 +30,20 @@ struct nv10_fb_priv {
        struct nouveau_fb base;
 };
 
-static void
+static int
+nv10_fb_vram_init(struct nouveau_fb *pfb)
+{
+       u32 cfg0 = nv_rd32(pfb, 0x100200);
+       if (cfg0 & 0x00000001)
+               pfb->ram.type = NV_MEM_TYPE_DDR1;
+       else
+               pfb->ram.type = NV_MEM_TYPE_SDRAM;
+
+       pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+       return 0;
+}
+
+void
 nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
                  u32 flags, struct nouveau_fb_tile *tile)
 {
@@ -39,7 +52,7 @@ nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
        tile->pitch = pitch;
 }
 
-static void
+void
 nv10_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
 {
        tile->addr  = 0;
@@ -54,6 +67,7 @@ nv10_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
        nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
        nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
        nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
+       nv_rd32(pfb, 0x100240 + (i * 0x10));
 }
 
 static int
@@ -61,7 +75,6 @@ nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
             struct nouveau_oclass *oclass, void *data, u32 size,
             struct nouveau_object **pobject)
 {
-       struct nouveau_device *device = nv_device(parent);
        struct nv10_fb_priv *priv;
        int ret;
 
@@ -70,42 +83,13 @@ nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       if (device->chipset == 0x1a ||  device->chipset == 0x1f) {
-               struct pci_dev *bridge;
-               u32 mem, mib;
-
-               bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
-               if (!bridge) {
-                       nv_fatal(device, "no bridge device\n");
-                       return 0;
-               }
-
-               if (device->chipset == 0x1a) {
-                       pci_read_config_dword(bridge, 0x7c, &mem);
-                       mib = ((mem >> 6) & 31) + 1;
-               } else {
-                       pci_read_config_dword(bridge, 0x84, &mem);
-                       mib = ((mem >> 4) & 127) + 1;
-               }
-
-               priv->base.ram.type = NV_MEM_TYPE_STOLEN;
-               priv->base.ram.size = mib * 1024 * 1024;
-       } else {
-               u32 cfg0 = nv_rd32(priv, 0x100200);
-               if (cfg0 & 0x00000001)
-                       priv->base.ram.type = NV_MEM_TYPE_DDR1;
-               else
-                       priv->base.ram.type = NV_MEM_TYPE_SDRAM;
-
-               priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-       }
-
        priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv10_fb_vram_init;
        priv->base.tile.regions = 8;
        priv->base.tile.init = nv10_fb_tile_init;
        priv->base.tile.fini = nv10_fb_tile_fini;
        priv->base.tile.prog = nv10_fb_tile_prog;
-       return nouveau_fb_created(&priv->base);
+       return nouveau_fb_preinit(&priv->base);
 }
 
 struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
new file mode 100644 (file)
index 0000000..4836684
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv1a_fb_priv {
+       struct nouveau_fb base;
+};
+
+static int
+nv1a_fb_vram_init(struct nouveau_fb *pfb)
+{
+       struct pci_dev *bridge;
+       u32 mem, mib;
+
+       bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
+       if (!bridge) {
+               nv_fatal(pfb, "no bridge device\n");
+               return -ENODEV;
+       }
+
+       if (nv_device(pfb)->chipset == 0x1a) {
+               pci_read_config_dword(bridge, 0x7c, &mem);
+               mib = ((mem >> 6) & 31) + 1;
+       } else {
+               pci_read_config_dword(bridge, 0x84, &mem);
+               mib = ((mem >> 4) & 127) + 1;
+       }
+
+       pfb->ram.type = NV_MEM_TYPE_STOLEN;
+       pfb->ram.size = mib * 1024 * 1024;
+       return 0;
+}
+
+static int
+nv1a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nv1a_fb_priv *priv;
+       int ret;
+
+       ret = nouveau_fb_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv1a_fb_vram_init;
+       priv->base.tile.regions = 8;
+       priv->base.tile.init = nv10_fb_tile_init;
+       priv->base.tile.fini = nv10_fb_tile_fini;
+       priv->base.tile.prog = nv10_fb_tile_prog;
+       return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv1a_fb_oclass = {
+       .handle = NV_SUBDEV(FB, 0x1a),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv1a_fb_ctor,
+               .dtor = _nouveau_fb_dtor,
+               .init = _nouveau_fb_init,
+               .fini = _nouveau_fb_fini,
+       },
+};
index 4b3578f..5d14612 100644 (file)
@@ -30,43 +30,54 @@ struct nv20_fb_priv {
        struct nouveau_fb base;
 };
 
-static void
+int
+nv20_fb_vram_init(struct nouveau_fb *pfb)
+{
+       u32 pbus1218 = nv_rd32(pfb, 0x001218);
+
+       switch (pbus1218 & 0x00000300) {
+       case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break;
+       case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+       case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+       case 0x00000300: pfb->ram.type = NV_MEM_TYPE_GDDR2; break;
+       }
+       pfb->ram.size  = (nv_rd32(pfb, 0x10020c) & 0xff000000);
+       pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+
+       return nv_rd32(pfb, 0x100320);
+}
+
+void
 nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
                  u32 flags, struct nouveau_fb_tile *tile)
 {
-       struct nouveau_device *device = nv_device(pfb);
-       int bpp = (flags & 2) ? 32 : 16;
-
        tile->addr  = 0x00000001 | addr;
        tile->limit = max(1u, addr + size) - 1;
        tile->pitch = pitch;
-
-       /* Allocate some of the on-die tag memory, used to store Z
-        * compression meta-data (most likely just a bitmap determining
-        * if a given tile is compressed or not).
-        */
-       size /= 256;
        if (flags & 4) {
-               if (!nouveau_mm_head(&pfb->tags, 1, size, size, 1, &tile->tag)) {
-                       /* Enable Z compression */
-                       tile->zcomp = tile->tag->offset;
-                       if (device->chipset >= 0x25) {
-                               if (bpp == 16)
-                                       tile->zcomp |= 0x00100000;
-                               else
-                                       tile->zcomp |= 0x00200000;
-                       } else {
-                               tile->zcomp |= 0x80000000;
-                               if (bpp != 16)
-                                       tile->zcomp |= 0x04000000;
-                       }
-               }
-
+               pfb->tile.comp(pfb, i, size, flags, tile);
                tile->addr |= 2;
        }
 }
 
 static void
+nv20_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+                 struct nouveau_fb_tile *tile)
+{
+       u32 tiles = DIV_ROUND_UP(size, 0x40);
+       u32 tags  = round_up(tiles / pfb->ram.parts, 0x40);
+       if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+               if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
+               else              tile->zcomp = 0x04000000; /* Z24S8 */
+               tile->zcomp |= tile->tag->offset;
+               tile->zcomp |= 0x80000000; /* enable */
+#ifdef __BIG_ENDIAN
+               tile->zcomp |= 0x08000000;
+#endif
+       }
+}
+
+void
 nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
 {
        tile->addr  = 0;
@@ -76,12 +87,13 @@ nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
        nouveau_mm_free(&pfb->tags, &tile->tag);
 }
 
-static void
+void
 nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
 {
        nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
        nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
        nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
+       nv_rd32(pfb, 0x100240 + (i * 0x10));
        nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
 }
 
@@ -90,9 +102,7 @@ nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
             struct nouveau_oclass *oclass, void *data, u32 size,
             struct nouveau_object **pobject)
 {
-       struct nouveau_device *device = nv_device(parent);
        struct nv20_fb_priv *priv;
-       u32 pbus1218;
        int ret;
 
        ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -100,28 +110,14 @@ nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       pbus1218 = nv_rd32(priv, 0x001218);
-       switch (pbus1218 & 0x00000300) {
-       case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
-       case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
-       case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
-       case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
-       }
-       priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-
-       if (device->chipset >= 0x25)
-               ret = nouveau_mm_init(&priv->base.tags, 0, 64 * 1024, 1);
-       else
-               ret = nouveau_mm_init(&priv->base.tags, 0, 32 * 1024, 1);
-       if (ret)
-               return ret;
-
        priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv20_fb_vram_init;
        priv->base.tile.regions = 8;
        priv->base.tile.init = nv20_fb_tile_init;
+       priv->base.tile.comp = nv20_fb_tile_comp;
        priv->base.tile.fini = nv20_fb_tile_fini;
        priv->base.tile.prog = nv20_fb_tile_prog;
-       return nouveau_fb_created(&priv->base);
+       return nouveau_fb_preinit(&priv->base);
 }
 
 struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
new file mode 100644 (file)
index 0000000..0042ace
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv25_fb_priv {
+       struct nouveau_fb base;
+};
+
+static void
+nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+                 struct nouveau_fb_tile *tile)
+{
+       u32 tiles = DIV_ROUND_UP(size, 0x40);
+       u32 tags  = round_up(tiles / pfb->ram.parts, 0x40);
+       if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+               if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
+               else              tile->zcomp = 0x00200000; /* Z24S8 */
+               tile->zcomp |= tile->tag->offset;
+#ifdef __BIG_ENDIAN
+               tile->zcomp |= 0x01000000;
+#endif
+       }
+}
+
+static int
+nv25_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nv25_fb_priv *priv;
+       int ret;
+
+       ret = nouveau_fb_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv20_fb_vram_init;
+       priv->base.tile.regions = 8;
+       priv->base.tile.init = nv20_fb_tile_init;
+       priv->base.tile.comp = nv25_fb_tile_comp;
+       priv->base.tile.fini = nv20_fb_tile_fini;
+       priv->base.tile.prog = nv20_fb_tile_prog;
+       return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv25_fb_oclass = {
+       .handle = NV_SUBDEV(FB, 0x25),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv25_fb_ctor,
+               .dtor = _nouveau_fb_dtor,
+               .init = _nouveau_fb_init,
+               .fini = _nouveau_fb_fini,
+       },
+};
index cba67bc..a7ba0d0 100644 (file)
@@ -34,17 +34,36 @@ void
 nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
                  u32 flags, struct nouveau_fb_tile *tile)
 {
-       tile->addr = addr | 1;
+       /* for performance, select alternate bank offset for zeta */
+       if (!(flags & 4)) {
+               tile->addr = (0 << 4);
+       } else {
+               if (pfb->tile.comp) /* z compression */
+                       pfb->tile.comp(pfb, i, size, flags, tile);
+               tile->addr = (1 << 4);
+       }
+
+       tile->addr |= 0x00000001; /* enable */
+       tile->addr |= addr;
        tile->limit = max(1u, addr + size) - 1;
        tile->pitch = pitch;
 }
 
-void
-nv30_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+static void
+nv30_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+                 struct nouveau_fb_tile *tile)
 {
-       tile->addr  = 0;
-       tile->limit = 0;
-       tile->pitch = 0;
+       u32 tiles = DIV_ROUND_UP(size, 0x40);
+       u32 tags  = round_up(tiles / pfb->ram.parts, 0x40);
+       if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+               if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
+               else           tile->zcomp |= 0x02000000; /* Z24S8 */
+               tile->zcomp |= ((tile->tag->offset           ) >> 6);
+               tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 12;
+#ifdef __BIG_ENDIAN
+               tile->zcomp |= 0x10000000;
+#endif
+       }
 }
 
 static int
@@ -72,7 +91,7 @@ calc_ref(struct nv30_fb_priv *priv, int l, int k, int i)
        return x;
 }
 
-static int
+int
 nv30_fb_init(struct nouveau_object *object)
 {
        struct nouveau_device *device = nv_device(object);
@@ -111,7 +130,6 @@ nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
             struct nouveau_object **pobject)
 {
        struct nv30_fb_priv *priv;
-       u32 pbus1218;
        int ret;
 
        ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -119,21 +137,14 @@ nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       pbus1218 = nv_rd32(priv, 0x001218);
-       switch (pbus1218 & 0x00000300) {
-       case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
-       case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
-       case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
-       case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
-       }
-       priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-
        priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv20_fb_vram_init;
        priv->base.tile.regions = 8;
        priv->base.tile.init = nv30_fb_tile_init;
-       priv->base.tile.fini = nv30_fb_tile_fini;
-       priv->base.tile.prog = nv10_fb_tile_prog;
-       return nouveau_fb_created(&priv->base);
+       priv->base.tile.comp = nv30_fb_tile_comp;
+       priv->base.tile.fini = nv20_fb_tile_fini;
+       priv->base.tile.prog = nv20_fb_tile_prog;
+       return nouveau_fb_preinit(&priv->base);
 }
 
 struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
new file mode 100644 (file)
index 0000000..092f6f4
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv35_fb_priv {
+       struct nouveau_fb base;
+};
+
+static void
+nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+                 struct nouveau_fb_tile *tile)
+{
+       u32 tiles = DIV_ROUND_UP(size, 0x40);
+       u32 tags  = round_up(tiles / pfb->ram.parts, 0x40);
+       if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+               if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
+               else           tile->zcomp |= 0x08000000; /* Z24S8 */
+               tile->zcomp |= ((tile->tag->offset           ) >> 6);
+               tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 13;
+#ifdef __BIG_ENDIAN
+               tile->zcomp |= 0x40000000;
+#endif
+       }
+}
+
+static int
+nv35_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nv35_fb_priv *priv;
+       int ret;
+
+       ret = nouveau_fb_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv20_fb_vram_init;
+       priv->base.tile.regions = 8;
+       priv->base.tile.init = nv30_fb_tile_init;
+       priv->base.tile.comp = nv35_fb_tile_comp;
+       priv->base.tile.fini = nv20_fb_tile_fini;
+       priv->base.tile.prog = nv20_fb_tile_prog;
+       return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv35_fb_oclass = {
+       .handle = NV_SUBDEV(FB, 0x35),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv35_fb_ctor,
+               .dtor = _nouveau_fb_dtor,
+               .init = nv30_fb_init,
+               .fini = _nouveau_fb_fini,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
new file mode 100644 (file)
index 0000000..797ab3b
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv36_fb_priv {
+       struct nouveau_fb base;
+};
+
+static void
+nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+                 struct nouveau_fb_tile *tile)
+{
+       u32 tiles = DIV_ROUND_UP(size, 0x40);
+       u32 tags  = round_up(tiles / pfb->ram.parts, 0x40);
+       if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+               if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
+               else           tile->zcomp |= 0x20000000; /* Z24S8 */
+               tile->zcomp |= ((tile->tag->offset           ) >> 6);
+               tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 14;
+#ifdef __BIG_ENDIAN
+               tile->zcomp |= 0x80000000;
+#endif
+       }
+}
+
+static int
+nv36_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nv36_fb_priv *priv;
+       int ret;
+
+       ret = nouveau_fb_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv20_fb_vram_init;
+       priv->base.tile.regions = 8;
+       priv->base.tile.init = nv30_fb_tile_init;
+       priv->base.tile.comp = nv36_fb_tile_comp;
+       priv->base.tile.fini = nv20_fb_tile_fini;
+       priv->base.tile.prog = nv20_fb_tile_prog;
+       return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv36_fb_oclass = {
+       .handle = NV_SUBDEV(FB, 0x36),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv36_fb_ctor,
+               .dtor = _nouveau_fb_dtor,
+               .init = nv30_fb_init,
+               .fini = _nouveau_fb_fini,
+       },
+};
index 347a496..65e131b 100644 (file)
@@ -30,34 +30,37 @@ struct nv40_fb_priv {
        struct nouveau_fb base;
 };
 
-static inline int
-nv44_graph_class(struct nouveau_device *device)
-{
-       if ((device->chipset & 0xf0) == 0x60)
-               return 1;
-
-       return !(0x0baf & (1 << (device->chipset & 0x0f)));
-}
-
-static void
-nv40_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+static int
+nv40_fb_vram_init(struct nouveau_fb *pfb)
 {
-       nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
-       nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
-       nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
-}
+       u32 pbus1218 = nv_rd32(pfb, 0x001218);
+       switch (pbus1218 & 0x00000300) {
+       case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break;
+       case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+       case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+       case 0x00000300: pfb->ram.type = NV_MEM_TYPE_DDR2; break;
+       }
 
-static void
-nv40_fb_init_gart(struct nv40_fb_priv *priv)
-{
-       nv_wr32(priv, 0x100800, 0x00000001);
+       pfb->ram.size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
+       pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+       return nv_rd32(pfb, 0x100320);
 }
 
-static void
-nv44_fb_init_gart(struct nv40_fb_priv *priv)
+void
+nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+                 struct nouveau_fb_tile *tile)
 {
-       nv_wr32(priv, 0x100850, 0x80000000);
-       nv_wr32(priv, 0x100800, 0x00000001);
+       u32 tiles = DIV_ROUND_UP(size, 0x80);
+       u32 tags  = round_up(tiles / pfb->ram.parts, 0x100);
+       if ( (flags & 2) &&
+           !nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+               tile->zcomp  = 0x28000000; /* Z24S8_SPLIT_GRAD */
+               tile->zcomp |= ((tile->tag->offset           ) >> 8);
+               tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
+#ifdef __BIG_ENDIAN
+               tile->zcomp |= 0x40000000;
+#endif
+       }
 }
 
 static int
@@ -70,19 +73,7 @@ nv40_fb_init(struct nouveau_object *object)
        if (ret)
                return ret;
 
-       switch (nv_device(priv)->chipset) {
-       case 0x40:
-       case 0x45:
-               nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
-               break;
-       default:
-               if (nv44_graph_class(nv_device(priv)))
-                       nv44_fb_init_gart(priv);
-               else
-                       nv40_fb_init_gart(priv);
-               break;
-       }
-
+       nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
        return 0;
 }
 
@@ -91,7 +82,6 @@ nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
             struct nouveau_oclass *oclass, void *data, u32 size,
             struct nouveau_object **pobject)
 {
-       struct nouveau_device *device = nv_device(parent);
        struct nv40_fb_priv *priv;
        int ret;
 
@@ -100,69 +90,14 @@ nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       /* 0x001218 is actually present on a few other NV4X I looked at,
-        * and even contains sane values matching 0x100474.  From looking
-        * at various vbios images however, this isn't the case everywhere.
-        * So, I chose to use the same regs I've seen NVIDIA reading around
-        * the memory detection, hopefully that'll get us the right numbers
-        */
-       if (device->chipset == 0x40) {
-               u32 pbus1218 = nv_rd32(priv, 0x001218);
-               switch (pbus1218 & 0x00000300) {
-               case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
-               case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
-               case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
-               case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
-               }
-       } else
-       if (device->chipset == 0x49 || device->chipset == 0x4b) {
-               u32 pfb914 = nv_rd32(priv, 0x100914);
-               switch (pfb914 & 0x00000003) {
-               case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
-               case 0x00000001: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
-               case 0x00000002: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
-               case 0x00000003: break;
-               }
-       } else
-       if (device->chipset != 0x4e) {
-               u32 pfb474 = nv_rd32(priv, 0x100474);
-               if (pfb474 & 0x00000004)
-                       priv->base.ram.type = NV_MEM_TYPE_GDDR3;
-               if (pfb474 & 0x00000002)
-                       priv->base.ram.type = NV_MEM_TYPE_DDR2;
-               if (pfb474 & 0x00000001)
-                       priv->base.ram.type = NV_MEM_TYPE_DDR1;
-       } else {
-               priv->base.ram.type = NV_MEM_TYPE_STOLEN;
-       }
-
-       priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-
        priv->base.memtype_valid = nv04_fb_memtype_valid;
-       switch (device->chipset) {
-       case 0x40:
-       case 0x45:
-               priv->base.tile.regions = 8;
-               break;
-       case 0x46:
-       case 0x47:
-       case 0x49:
-       case 0x4b:
-       case 0x4c:
-               priv->base.tile.regions = 15;
-               break;
-       default:
-               priv->base.tile.regions = 12;
-               break;
-       }
+       priv->base.ram.init = nv40_fb_vram_init;
+       priv->base.tile.regions = 8;
        priv->base.tile.init = nv30_fb_tile_init;
-       priv->base.tile.fini = nv30_fb_tile_fini;
-       if (device->chipset == 0x40)
-               priv->base.tile.prog = nv10_fb_tile_prog;
-       else
-               priv->base.tile.prog = nv40_fb_tile_prog;
-
-       return nouveau_fb_created(&priv->base);
+       priv->base.tile.comp = nv40_fb_tile_comp;
+       priv->base.tile.fini = nv20_fb_tile_fini;
+       priv->base.tile.prog = nv20_fb_tile_prog;
+       return nouveau_fb_preinit(&priv->base);
 }
 
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
new file mode 100644 (file)
index 0000000..e9e5a08
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv41_fb_priv {
+       struct nouveau_fb base;
+};
+
+int
+nv41_fb_vram_init(struct nouveau_fb *pfb)
+{
+       u32 pfb474 = nv_rd32(pfb, 0x100474);
+       if (pfb474 & 0x00000004)
+               pfb->ram.type = NV_MEM_TYPE_GDDR3;
+       if (pfb474 & 0x00000002)
+               pfb->ram.type = NV_MEM_TYPE_DDR2;
+       if (pfb474 & 0x00000001)
+               pfb->ram.type = NV_MEM_TYPE_DDR1;
+
+       pfb->ram.size =   nv_rd32(pfb, 0x10020c) & 0xff000000;
+       pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+       return nv_rd32(pfb, 0x100320);
+}
+
+void
+nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+       nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
+       nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
+       nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
+       nv_rd32(pfb, 0x100600 + (i * 0x10));
+       nv_wr32(pfb, 0x100700 + (i * 0x04), tile->zcomp);
+}
+
+int
+nv41_fb_init(struct nouveau_object *object)
+{
+       struct nv41_fb_priv *priv = (void *)object;
+       int ret;
+
+       ret = nouveau_fb_init(&priv->base);
+       if (ret)
+               return ret;
+
+       nv_wr32(priv, 0x100800, 0x00000001);
+       return 0;
+}
+
+static int
+nv41_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nv41_fb_priv *priv;
+       int ret;
+
+       ret = nouveau_fb_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv41_fb_vram_init;
+       priv->base.tile.regions = 12;
+       priv->base.tile.init = nv30_fb_tile_init;
+       priv->base.tile.comp = nv40_fb_tile_comp;
+       priv->base.tile.fini = nv20_fb_tile_fini;
+       priv->base.tile.prog = nv41_fb_tile_prog;
+       return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv41_fb_oclass = {
+       .handle = NV_SUBDEV(FB, 0x41),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv41_fb_ctor,
+               .dtor = _nouveau_fb_dtor,
+               .init = nv41_fb_init,
+               .fini = _nouveau_fb_fini,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
new file mode 100644 (file)
index 0000000..ae89b50
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv44_fb_priv {
+       struct nouveau_fb base;
+};
+
+int
+nv44_fb_vram_init(struct nouveau_fb *pfb)
+{
+       u32 pfb474 = nv_rd32(pfb, 0x100474);
+       if (pfb474 & 0x00000004)
+               pfb->ram.type = NV_MEM_TYPE_GDDR3;
+       if (pfb474 & 0x00000002)
+               pfb->ram.type = NV_MEM_TYPE_DDR2;
+       if (pfb474 & 0x00000001)
+               pfb->ram.type = NV_MEM_TYPE_DDR1;
+
+       pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+       return 0;
+}
+
+static void
+nv44_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+                 u32 flags, struct nouveau_fb_tile *tile)
+{
+       tile->addr  = 0x00000001; /* mode = vram */
+       tile->addr |= addr;
+       tile->limit = max(1u, addr + size) - 1;
+       tile->pitch = pitch;
+}
+
+void
+nv44_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+       nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
+       nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
+       nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
+       nv_rd32(pfb, 0x100600 + (i * 0x10));
+}
+
+int
+nv44_fb_init(struct nouveau_object *object)
+{
+       struct nv44_fb_priv *priv = (void *)object;
+       int ret;
+
+       ret = nouveau_fb_init(&priv->base);
+       if (ret)
+               return ret;
+
+       nv_wr32(priv, 0x100850, 0x80000000);
+       nv_wr32(priv, 0x100800, 0x00000001);
+       return 0;
+}
+
+static int
+nv44_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nv44_fb_priv *priv;
+       int ret;
+
+       ret = nouveau_fb_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv44_fb_vram_init;
+       priv->base.tile.regions = 12;
+       priv->base.tile.init = nv44_fb_tile_init;
+       priv->base.tile.fini = nv20_fb_tile_fini;
+       priv->base.tile.prog = nv44_fb_tile_prog;
+       return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv44_fb_oclass = {
+       .handle = NV_SUBDEV(FB, 0x44),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv44_fb_ctor,
+               .dtor = _nouveau_fb_dtor,
+               .init = nv44_fb_init,
+               .fini = _nouveau_fb_fini,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
new file mode 100644 (file)
index 0000000..589b93e
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv46_fb_priv {
+       struct nouveau_fb base;
+};
+
+void
+nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+                 u32 flags, struct nouveau_fb_tile *tile)
+{
+       /* for performance, select alternate bank offset for zeta */
+       if (!(flags & 4)) tile->addr = (0 << 3);
+       else              tile->addr = (1 << 3);
+
+       tile->addr |= 0x00000001; /* mode = vram */
+       tile->addr |= addr;
+       tile->limit = max(1u, addr + size) - 1;
+       tile->pitch = pitch;
+}
+
+static int
+nv46_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nv46_fb_priv *priv;
+       int ret;
+
+       ret = nouveau_fb_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv44_fb_vram_init;
+       priv->base.tile.regions = 15;
+       priv->base.tile.init = nv46_fb_tile_init;
+       priv->base.tile.fini = nv20_fb_tile_fini;
+       priv->base.tile.prog = nv44_fb_tile_prog;
+       return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv46_fb_oclass = {
+       .handle = NV_SUBDEV(FB, 0x46),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv46_fb_ctor,
+               .dtor = _nouveau_fb_dtor,
+               .init = nv44_fb_init,
+               .fini = _nouveau_fb_fini,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
new file mode 100644 (file)
index 0000000..818bba3
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv47_fb_priv {
+       struct nouveau_fb base;
+};
+
+static int
+nv47_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nv47_fb_priv *priv;
+       int ret;
+
+       ret = nouveau_fb_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv41_fb_vram_init;
+       priv->base.tile.regions = 15;
+       priv->base.tile.init = nv30_fb_tile_init;
+       priv->base.tile.comp = nv40_fb_tile_comp;
+       priv->base.tile.fini = nv20_fb_tile_fini;
+       priv->base.tile.prog = nv41_fb_tile_prog;
+       return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv47_fb_oclass = {
+       .handle = NV_SUBDEV(FB, 0x47),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv47_fb_ctor,
+               .dtor = _nouveau_fb_dtor,
+               .init = nv41_fb_init,
+               .fini = _nouveau_fb_fini,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
new file mode 100644 (file)
index 0000000..84a31af
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv49_fb_priv {
+       struct nouveau_fb base;
+};
+
+static int
+nv49_fb_vram_init(struct nouveau_fb *pfb)
+{
+       u32 pfb914 = nv_rd32(pfb, 0x100914);
+
+       switch (pfb914 & 0x00000003) {
+       case 0x00000000: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+       case 0x00000001: pfb->ram.type = NV_MEM_TYPE_DDR2; break;
+       case 0x00000002: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+       case 0x00000003: break;
+       }
+
+       pfb->ram.size =   nv_rd32(pfb, 0x10020c) & 0xff000000;
+       pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+       return nv_rd32(pfb, 0x100320);
+}
+
+static int
+nv49_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nv49_fb_priv *priv;
+       int ret;
+
+       ret = nouveau_fb_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv49_fb_vram_init;
+       priv->base.tile.regions = 15;
+       priv->base.tile.init = nv30_fb_tile_init;
+       priv->base.tile.comp = nv40_fb_tile_comp;
+       priv->base.tile.fini = nv20_fb_tile_fini;
+       priv->base.tile.prog = nv41_fb_tile_prog;
+
+       return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv49_fb_oclass = {
+       .handle = NV_SUBDEV(FB, 0x49),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv49_fb_ctor,
+               .dtor = _nouveau_fb_dtor,
+               .init = nv41_fb_init,
+               .fini = _nouveau_fb_fini,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
new file mode 100644 (file)
index 0000000..797fd55
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv4e_fb_priv {
+       struct nouveau_fb base;
+};
+
+static int
+nv4e_fb_vram_init(struct nouveau_fb *pfb)
+{
+       pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+       pfb->ram.type = NV_MEM_TYPE_STOLEN;
+       return 0;
+}
+
+static int
+nv4e_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nv4e_fb_priv *priv;
+       int ret;
+
+       ret = nouveau_fb_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv4e_fb_vram_init;
+       priv->base.tile.regions = 12;
+       priv->base.tile.init = nv46_fb_tile_init;
+       priv->base.tile.fini = nv20_fb_tile_fini;
+       priv->base.tile.prog = nv44_fb_tile_prog;
+       return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv4e_fb_oclass = {
+       .handle = NV_SUBDEV(FB, 0x4e),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv4e_fb_ctor,
+               .dtor = _nouveau_fb_dtor,
+               .init = nv44_fb_init,
+               .fini = _nouveau_fb_fini,
+       },
+};
index 5f57080..487cb8c 100644 (file)
@@ -51,6 +51,101 @@ nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype)
        return types[(memtype & 0xff00) >> 8] != 0;
 }
 
+static u32
+nv50_fb_vram_rblock(struct nouveau_fb *pfb)
+{
+       int i, parts, colbits, rowbitsa, rowbitsb, banks;
+       u64 rowsize, predicted;
+       u32 r0, r4, rt, ru, rblock_size;
+
+       r0 = nv_rd32(pfb, 0x100200);
+       r4 = nv_rd32(pfb, 0x100204);
+       rt = nv_rd32(pfb, 0x100250);
+       ru = nv_rd32(pfb, 0x001540);
+       nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
+
+       for (i = 0, parts = 0; i < 8; i++) {
+               if (ru & (0x00010000 << i))
+                       parts++;
+       }
+
+       colbits  =  (r4 & 0x0000f000) >> 12;
+       rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
+       rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
+       banks    = 1 << (((r4 & 0x03000000) >> 24) + 2);
+
+       rowsize = parts * banks * (1 << colbits) * 8;
+       predicted = rowsize << rowbitsa;
+       if (r0 & 0x00000004)
+               predicted += rowsize << rowbitsb;
+
+       if (predicted != pfb->ram.size) {
+               nv_warn(pfb, "memory controller reports %d MiB VRAM\n",
+                       (u32)(pfb->ram.size >> 20));
+       }
+
+       rblock_size = rowsize;
+       if (rt & 1)
+               rblock_size *= 3;
+
+       nv_debug(pfb, "rblock %d bytes\n", rblock_size);
+       return rblock_size;
+}
+
+static int
+nv50_fb_vram_init(struct nouveau_fb *pfb)
+{
+       struct nouveau_device *device = nv_device(pfb);
+       struct nouveau_bios *bios = nouveau_bios(device);
+       const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+       const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+       u32 size;
+       int ret;
+
+       pfb->ram.size = nv_rd32(pfb, 0x10020c);
+       pfb->ram.size = (pfb->ram.size & 0xffffff00) |
+                      ((pfb->ram.size & 0x000000ff) << 32);
+
+       size = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail;
+       switch (device->chipset) {
+       case 0xaa:
+       case 0xac:
+       case 0xaf: /* IGPs, no reordering, no real VRAM */
+               ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, 1);
+               if (ret)
+                       return ret;
+
+               pfb->ram.type   = NV_MEM_TYPE_STOLEN;
+               pfb->ram.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
+               break;
+       default:
+               switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
+               case 0: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+               case 1:
+                       if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
+                               pfb->ram.type = NV_MEM_TYPE_DDR3;
+                       else
+                               pfb->ram.type = NV_MEM_TYPE_DDR2;
+                       break;
+               case 2: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+               case 3: pfb->ram.type = NV_MEM_TYPE_GDDR4; break;
+               case 4: pfb->ram.type = NV_MEM_TYPE_GDDR5; break;
+               default:
+                       break;
+               }
+
+               ret = nouveau_mm_init(&pfb->vram, rsvd_head, size,
+                                     nv50_fb_vram_rblock(pfb) >> 12);
+               if (ret)
+                       return ret;
+
+               pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
+               break;
+       }
+
+       return nv_rd32(pfb, 0x100320);
+}
+
 static int
 nv50_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
                 u32 memtype, struct nouveau_mem **pmem)
@@ -140,195 +235,6 @@ nv50_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
        kfree(mem);
 }
 
-static u32
-nv50_vram_rblock(struct nv50_fb_priv *priv)
-{
-       int i, parts, colbits, rowbitsa, rowbitsb, banks;
-       u64 rowsize, predicted;
-       u32 r0, r4, rt, ru, rblock_size;
-
-       r0 = nv_rd32(priv, 0x100200);
-       r4 = nv_rd32(priv, 0x100204);
-       rt = nv_rd32(priv, 0x100250);
-       ru = nv_rd32(priv, 0x001540);
-       nv_debug(priv, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
-
-       for (i = 0, parts = 0; i < 8; i++) {
-               if (ru & (0x00010000 << i))
-                       parts++;
-       }
-
-       colbits  =  (r4 & 0x0000f000) >> 12;
-       rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
-       rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
-       banks    = 1 << (((r4 & 0x03000000) >> 24) + 2);
-
-       rowsize = parts * banks * (1 << colbits) * 8;
-       predicted = rowsize << rowbitsa;
-       if (r0 & 0x00000004)
-               predicted += rowsize << rowbitsb;
-
-       if (predicted != priv->base.ram.size) {
-               nv_warn(priv, "memory controller reports %d MiB VRAM\n",
-                       (u32)(priv->base.ram.size >> 20));
-       }
-
-       rblock_size = rowsize;
-       if (rt & 1)
-               rblock_size *= 3;
-
-       nv_debug(priv, "rblock %d bytes\n", rblock_size);
-       return rblock_size;
-}
-
-static int
-nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-            struct nouveau_oclass *oclass, void *data, u32 size,
-            struct nouveau_object **pobject)
-{
-       struct nouveau_device *device = nv_device(parent);
-       struct nouveau_bios *bios = nouveau_bios(device);
-       const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
-       const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
-       struct nv50_fb_priv *priv;
-       u32 tags;
-       int ret;
-
-       ret = nouveau_fb_create(parent, engine, oclass, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       switch (nv_rd32(priv, 0x100714) & 0x00000007) {
-       case 0: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
-       case 1:
-               if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
-                       priv->base.ram.type = NV_MEM_TYPE_DDR3;
-               else
-                       priv->base.ram.type = NV_MEM_TYPE_DDR2;
-               break;
-       case 2: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
-       case 3: priv->base.ram.type = NV_MEM_TYPE_GDDR4; break;
-       case 4: priv->base.ram.type = NV_MEM_TYPE_GDDR5; break;
-       default:
-               break;
-       }
-
-       priv->base.ram.size = nv_rd32(priv, 0x10020c);
-       priv->base.ram.size = (priv->base.ram.size & 0xffffff00) |
-                            ((priv->base.ram.size & 0x000000ff) << 32);
-
-       tags = nv_rd32(priv, 0x100320);
-       ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1);
-       if (ret)
-               return ret;
-
-       nv_debug(priv, "%d compression tags\n", tags);
-
-       size = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
-       switch (device->chipset) {
-       case 0xaa:
-       case 0xac:
-       case 0xaf: /* IGPs, no reordering, no real VRAM */
-               ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size, 1);
-               if (ret)
-                       return ret;
-
-               priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12;
-               priv->base.ram.type = NV_MEM_TYPE_STOLEN;
-               break;
-       default:
-               ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size,
-                                     nv50_vram_rblock(priv) >> 12);
-               if (ret)
-                       return ret;
-
-               priv->base.ram.ranks = (nv_rd32(priv, 0x100200) & 0x4) ? 2 : 1;
-               break;
-       }
-
-       priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-       if (priv->r100c08_page) {
-               priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
-                                            0, PAGE_SIZE,
-                                            PCI_DMA_BIDIRECTIONAL);
-               if (pci_dma_mapping_error(device->pdev, priv->r100c08))
-                       nv_warn(priv, "failed 0x100c08 page map\n");
-       } else {
-               nv_warn(priv, "failed 0x100c08 page alloc\n");
-       }
-
-       priv->base.memtype_valid = nv50_fb_memtype_valid;
-       priv->base.ram.get = nv50_fb_vram_new;
-       priv->base.ram.put = nv50_fb_vram_del;
-       return nouveau_fb_created(&priv->base);
-}
-
-static void
-nv50_fb_dtor(struct nouveau_object *object)
-{
-       struct nouveau_device *device = nv_device(object);
-       struct nv50_fb_priv *priv = (void *)object;
-
-       if (priv->r100c08_page) {
-               pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
-                              PCI_DMA_BIDIRECTIONAL);
-               __free_page(priv->r100c08_page);
-       }
-
-       nouveau_fb_destroy(&priv->base);
-}
-
-static int
-nv50_fb_init(struct nouveau_object *object)
-{
-       struct nouveau_device *device = nv_device(object);
-       struct nv50_fb_priv *priv = (void *)object;
-       int ret;
-
-       ret = nouveau_fb_init(&priv->base);
-       if (ret)
-               return ret;
-
-       /* Not a clue what this is exactly.  Without pointing it at a
-        * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
-        * cause IOMMU "read from address 0" errors (rh#561267)
-        */
-       nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
-
-       /* This is needed to get meaningful information from 100c90
-        * on traps. No idea what these values mean exactly. */
-       switch (device->chipset) {
-       case 0x50:
-               nv_wr32(priv, 0x100c90, 0x000707ff);
-               break;
-       case 0xa3:
-       case 0xa5:
-       case 0xa8:
-               nv_wr32(priv, 0x100c90, 0x000d0fff);
-               break;
-       case 0xaf:
-               nv_wr32(priv, 0x100c90, 0x089d1fff);
-               break;
-       default:
-               nv_wr32(priv, 0x100c90, 0x001d07ff);
-               break;
-       }
-
-       return 0;
-}
-
-struct nouveau_oclass
-nv50_fb_oclass = {
-       .handle = NV_SUBDEV(FB, 0x50),
-       .ofuncs = &(struct nouveau_ofuncs) {
-               .ctor = nv50_fb_ctor,
-               .dtor = nv50_fb_dtor,
-               .init = nv50_fb_init,
-               .fini = _nouveau_fb_fini,
-       },
-};
-
 static const struct nouveau_enum vm_dispatch_subclients[] = {
        { 0x00000000, "GRCTX", NULL },
        { 0x00000001, "NOTIFY", NULL },
@@ -424,11 +330,11 @@ static const struct nouveau_enum vm_fault[] = {
        {}
 };
 
-void
-nv50_fb_trap(struct nouveau_fb *pfb, int display)
+static void
+nv50_fb_intr(struct nouveau_subdev *subdev)
 {
-       struct nouveau_device *device = nv_device(pfb);
-       struct nv50_fb_priv *priv = (void *)pfb;
+       struct nouveau_device *device = nv_device(subdev);
+       struct nv50_fb_priv *priv = (void *)subdev;
        const struct nouveau_enum *en, *cl;
        u32 trap[6], idx, chan;
        u8 st0, st1, st2, st3;
@@ -445,9 +351,6 @@ nv50_fb_trap(struct nouveau_fb *pfb, int display)
        }
        nv_wr32(priv, 0x100c90, idx | 0x80000000);
 
-       if (!display)
-               return;
-
        /* decode status bits into something more useful */
        if (device->chipset  < 0xa3 ||
            device->chipset == 0xaa || device->chipset == 0xac) {
@@ -494,3 +397,101 @@ nv50_fb_trap(struct nouveau_fb *pfb, int display)
        else
                printk("0x%08x\n", st1);
 }
+
+static int
+nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nouveau_device *device = nv_device(parent);
+       struct nv50_fb_priv *priv;
+       int ret;
+
+       ret = nouveau_fb_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+       if (priv->r100c08_page) {
+               priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
+                                            0, PAGE_SIZE,
+                                            PCI_DMA_BIDIRECTIONAL);
+               if (pci_dma_mapping_error(device->pdev, priv->r100c08))
+                       nv_warn(priv, "failed 0x100c08 page map\n");
+       } else {
+               nv_warn(priv, "failed 0x100c08 page alloc\n");
+       }
+
+       priv->base.memtype_valid = nv50_fb_memtype_valid;
+       priv->base.ram.init = nv50_fb_vram_init;
+       priv->base.ram.get = nv50_fb_vram_new;
+       priv->base.ram.put = nv50_fb_vram_del;
+       nv_subdev(priv)->intr = nv50_fb_intr;
+       return nouveau_fb_preinit(&priv->base);
+}
+
+static void
+nv50_fb_dtor(struct nouveau_object *object)
+{
+       struct nouveau_device *device = nv_device(object);
+       struct nv50_fb_priv *priv = (void *)object;
+
+       if (priv->r100c08_page) {
+               pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
+                              PCI_DMA_BIDIRECTIONAL);
+               __free_page(priv->r100c08_page);
+       }
+
+       nouveau_fb_destroy(&priv->base);
+}
+
+static int
+nv50_fb_init(struct nouveau_object *object)
+{
+       struct nouveau_device *device = nv_device(object);
+       struct nv50_fb_priv *priv = (void *)object;
+       int ret;
+
+       ret = nouveau_fb_init(&priv->base);
+       if (ret)
+               return ret;
+
+       /* Not a clue what this is exactly.  Without pointing it at a
+        * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
+        * cause IOMMU "read from address 0" errors (rh#561267)
+        */
+       nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
+
+       /* This is needed to get meaningful information from 100c90
+        * on traps. No idea what these values mean exactly. */
+       switch (device->chipset) {
+       case 0x50:
+               nv_wr32(priv, 0x100c90, 0x000707ff);
+               break;
+       case 0xa3:
+       case 0xa5:
+       case 0xa8:
+               nv_wr32(priv, 0x100c90, 0x000d0fff);
+               break;
+       case 0xaf:
+               nv_wr32(priv, 0x100c90, 0x089d1fff);
+               break;
+       default:
+               nv_wr32(priv, 0x100c90, 0x001d07ff);
+               break;
+       }
+
+       return 0;
+}
+
+struct nouveau_oclass
+nv50_fb_oclass = {
+       .handle = NV_SUBDEV(FB, 0x50),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv50_fb_ctor,
+               .dtor = nv50_fb_dtor,
+               .init = nv50_fb_init,
+               .fini = _nouveau_fb_fini,
+       },
+};
index 9f59f2b..306bdf1 100644 (file)
@@ -62,6 +62,65 @@ nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
 }
 
 static int
+nvc0_fb_vram_init(struct nouveau_fb *pfb)
+{
+       struct nouveau_bios *bios = nouveau_bios(pfb);
+       const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+       const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+       u32 parts = nv_rd32(pfb, 0x022438);
+       u32 pmask = nv_rd32(pfb, 0x022554);
+       u32 bsize = nv_rd32(pfb, 0x10f20c);
+       u32 offset, length;
+       bool uniform = true;
+       int ret, part;
+
+       nv_debug(pfb, "0x100800: 0x%08x\n", nv_rd32(pfb, 0x100800));
+       nv_debug(pfb, "parts 0x%08x mask 0x%08x\n", parts, pmask);
+
+       pfb->ram.type = nouveau_fb_bios_memtype(bios);
+       pfb->ram.ranks = (nv_rd32(pfb, 0x10f200) & 0x00000004) ? 2 : 1;
+
+       /* read amount of vram attached to each memory controller */
+       for (part = 0; part < parts; part++) {
+               if (!(pmask & (1 << part))) {
+                       u32 psize = nv_rd32(pfb, 0x11020c + (part * 0x1000));
+                       if (psize != bsize) {
+                               if (psize < bsize)
+                                       bsize = psize;
+                               uniform = false;
+                       }
+
+                       nv_debug(pfb, "%d: mem_amount 0x%08x\n", part, psize);
+                       pfb->ram.size += (u64)psize << 20;
+               }
+       }
+
+       /* if all controllers have the same amount attached, there's no holes */
+       if (uniform) {
+               offset = rsvd_head;
+               length = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail;
+               return nouveau_mm_init(&pfb->vram, offset, length, 1);
+       }
+
+       /* otherwise, address lowest common amount from 0GiB */
+       ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1);
+       if (ret)
+               return ret;
+
+       /* and the rest starting from (8GiB + common_size) */
+       offset = (0x0200000000ULL >> 12) + (bsize << 8);
+       length = (pfb->ram.size >> 12) - (bsize << 8) - rsvd_tail;
+
+       ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
+       if (ret) {
+               nouveau_mm_fini(&pfb->vram);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int
 nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
                 u32 memtype, struct nouveau_mem **pmem)
 {
@@ -139,66 +198,6 @@ nvc0_fb_dtor(struct nouveau_object *object)
 }
 
 static int
-nvc0_vram_detect(struct nvc0_fb_priv *priv)
-{
-       struct nouveau_bios *bios = nouveau_bios(priv);
-       struct nouveau_fb *pfb = &priv->base;
-       const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
-       const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
-       u32 parts = nv_rd32(priv, 0x022438);
-       u32 pmask = nv_rd32(priv, 0x022554);
-       u32 bsize = nv_rd32(priv, 0x10f20c);
-       u32 offset, length;
-       bool uniform = true;
-       int ret, part;
-
-       nv_debug(priv, "0x100800: 0x%08x\n", nv_rd32(priv, 0x100800));
-       nv_debug(priv, "parts 0x%08x mask 0x%08x\n", parts, pmask);
-
-       priv->base.ram.type = nouveau_fb_bios_memtype(bios);
-       priv->base.ram.ranks = (nv_rd32(priv, 0x10f200) & 0x00000004) ? 2 : 1;
-
-       /* read amount of vram attached to each memory controller */
-       for (part = 0; part < parts; part++) {
-               if (!(pmask & (1 << part))) {
-                       u32 psize = nv_rd32(priv, 0x11020c + (part * 0x1000));
-                       if (psize != bsize) {
-                               if (psize < bsize)
-                                       bsize = psize;
-                               uniform = false;
-                       }
-
-                       nv_debug(priv, "%d: mem_amount 0x%08x\n", part, psize);
-                       priv->base.ram.size += (u64)psize << 20;
-               }
-       }
-
-       /* if all controllers have the same amount attached, there's no holes */
-       if (uniform) {
-               offset = rsvd_head;
-               length = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
-               return nouveau_mm_init(&pfb->vram, offset, length, 1);
-       }
-
-       /* otherwise, address lowest common amount from 0GiB */
-       ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1);
-       if (ret)
-               return ret;
-
-       /* and the rest starting from (8GiB + common_size) */
-       offset = (0x0200000000ULL >> 12) + (bsize << 8);
-       length = (priv->base.ram.size >> 12) - (bsize << 8) - rsvd_tail;
-
-       ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
-       if (ret) {
-               nouveau_mm_fini(&pfb->vram);
-               return ret;
-       }
-
-       return 0;
-}
-
-static int
 nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
             struct nouveau_oclass *oclass, void *data, u32 size,
             struct nouveau_object **pobject)
@@ -213,13 +212,10 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                return ret;
 
        priv->base.memtype_valid = nvc0_fb_memtype_valid;
+       priv->base.ram.init = nvc0_fb_vram_init;
        priv->base.ram.get = nvc0_fb_vram_new;
        priv->base.ram.put = nv50_fb_vram_del;
 
-       ret = nvc0_vram_detect(priv);
-       if (ret)
-               return ret;
-
        priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
        if (!priv->r100c10_page)
                return -ENOMEM;
@@ -229,7 +225,7 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (pci_dma_mapping_error(device->pdev, priv->r100c10))
                return -EFAULT;
 
-       return nouveau_fb_created(&priv->base);
+       return nouveau_fb_preinit(&priv->base);
 }
 
 
index fe1ebf1..dc27e79 100644 (file)
@@ -50,7 +50,7 @@ auxch_init(struct nouveau_i2c *aux, int ch)
                ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
                udelay(1);
                if (!timeout--) {
-                       AUX_ERR("begin idle timeout 0x%08x", ctrl);
+                       AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
                        return -EBUSY;
                }
        } while (ctrl & 0x03010000);
index ba4d28b..f5bbd38 100644 (file)
@@ -63,14 +63,14 @@ nv04_instobj_dtor(struct nouveau_object *object)
 }
 
 static u32
-nv04_instobj_rd32(struct nouveau_object *object, u32 addr)
+nv04_instobj_rd32(struct nouveau_object *object, u64 addr)
 {
        struct nv04_instobj_priv *node = (void *)object;
        return nv_ro32(object->engine, node->mem->offset + addr);
 }
 
 static void
-nv04_instobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nv04_instobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
        struct nv04_instobj_priv *node = (void *)object;
        nv_wo32(object->engine, node->mem->offset + addr, data);
@@ -173,13 +173,13 @@ nv04_instmem_dtor(struct nouveau_object *object)
 }
 
 static u32
-nv04_instmem_rd32(struct nouveau_object *object, u32 addr)
+nv04_instmem_rd32(struct nouveau_object *object, u64 addr)
 {
        return nv_rd32(object, 0x700000 + addr);
 }
 
 static void
-nv04_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
        return nv_wr32(object, 0x700000 + addr, data);
 }
index 73c52eb..da64253 100644 (file)
@@ -111,14 +111,14 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 }
 
 static u32
-nv40_instmem_rd32(struct nouveau_object *object, u32 addr)
+nv40_instmem_rd32(struct nouveau_object *object, u64 addr)
 {
        struct nv04_instmem_priv *priv = (void *)object;
        return ioread32_native(priv->iomem + addr);
 }
 
 static void
-nv40_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
        struct nv04_instmem_priv *priv = (void *)object;
        iowrite32_native(data, priv->iomem + addr);
index 27ef089..cfc7e31 100644 (file)
@@ -76,7 +76,7 @@ nv50_instobj_dtor(struct nouveau_object *object)
 }
 
 static u32
-nv50_instobj_rd32(struct nouveau_object *object, u32 offset)
+nv50_instobj_rd32(struct nouveau_object *object, u64 offset)
 {
        struct nv50_instmem_priv *priv = (void *)object->engine;
        struct nv50_instobj_priv *node = (void *)object;
@@ -96,7 +96,7 @@ nv50_instobj_rd32(struct nouveau_object *object, u32 offset)
 }
 
 static void
-nv50_instobj_wr32(struct nouveau_object *object, u32 offset, u32 data)
+nv50_instobj_wr32(struct nouveau_object *object, u64 offset, u32 data)
 {
        struct nv50_instmem_priv *priv = (void *)object->engine;
        struct nv50_instobj_priv *node = (void *)object;
index de5721c..8379aaf 100644 (file)
@@ -30,20 +30,20 @@ nouveau_mc_intr(struct nouveau_subdev *subdev)
        struct nouveau_mc *pmc = nouveau_mc(subdev);
        const struct nouveau_mc_intr *map = pmc->intr_map;
        struct nouveau_subdev *unit;
-       u32 stat;
+       u32 stat, intr;
 
-       stat = nv_rd32(pmc, 0x000100);
+       intr = stat = nv_rd32(pmc, 0x000100);
        while (stat && map->stat) {
                if (stat & map->stat) {
                        unit = nouveau_subdev(subdev, map->unit);
                        if (unit && unit->intr)
                                unit->intr(unit);
-                       stat &= ~map->stat;
+                       intr &= ~map->stat;
                }
                map++;
        }
 
-       if (stat) {
+       if (intr) {
                nv_error(pmc, "unknown intr 0x%08x\n", stat);
        }
 }
index cedf33b..8d759f8 100644 (file)
@@ -39,6 +39,7 @@ nv50_mc_intr[] = {
        { 0x00200000, NVDEV_SUBDEV_GPIO },
        { 0x04000000, NVDEV_ENGINE_DISP },
        { 0x80000000, NVDEV_ENGINE_SW },
+       { 0x0000d101, NVDEV_SUBDEV_FB },
        {},
 };
 
index a001e4c..ceb5c83 100644 (file)
@@ -40,6 +40,7 @@ nv98_mc_intr[] = {
        { 0x00400000, NVDEV_ENGINE_COPY0 },     /* NVA3-     */
        { 0x04000000, NVDEV_ENGINE_DISP },
        { 0x80000000, NVDEV_ENGINE_SW },
+       { 0x0040d101, NVDEV_SUBDEV_FB },
        {},
 };
 
index c2b81e3..9279668 100644 (file)
@@ -36,6 +36,7 @@ nvc0_mc_intr[] = {
        { 0x00000100, NVDEV_ENGINE_FIFO },
        { 0x00001000, NVDEV_ENGINE_GR },
        { 0x00008000, NVDEV_ENGINE_BSP },
+       { 0x00020000, NVDEV_ENGINE_VP },
        { 0x00100000, NVDEV_SUBDEV_TIMER },
        { 0x00200000, NVDEV_SUBDEV_GPIO },
        { 0x02000000, NVDEV_SUBDEV_LTCG },
index 49050d9..9474cfc 100644 (file)
@@ -67,7 +67,7 @@ nv41_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
 static void
 nv41_vm_flush(struct nouveau_vm *vm)
 {
-       struct nv04_vm_priv *priv = (void *)vm->vmm;
+       struct nv04_vmmgr_priv *priv = (void *)vm->vmm;
 
        mutex_lock(&nv_subdev(priv)->mutex);
        nv_wr32(priv, 0x100810, 0x00000022);
index cc79c79..4124192 100644 (file)
@@ -241,15 +241,31 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
 
        if (unlikely(!abi16))
                return -ENOMEM;
-       client = nv_client(abi16->client);
 
-       if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
-               return nouveau_abi16_put(abi16, -EINVAL);
+       if (!drm->channel)
+               return nouveau_abi16_put(abi16, -ENODEV);
 
+       client = nv_client(abi16->client);
        device = nv_device(abi16->device);
        imem   = nouveau_instmem(device);
        pfb    = nouveau_fb(device);
 
+       /* hack to allow channel engine type specification on kepler */
+       if (device->card_type >= NV_E0) {
+               if (init->fb_ctxdma_handle != ~0)
+                       init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
+               else
+                       init->fb_ctxdma_handle = init->tt_ctxdma_handle;
+
+               /* allow flips to be executed if this is a graphics channel */
+               init->tt_ctxdma_handle = 0;
+               if (init->fb_ctxdma_handle == NVE0_CHANNEL_IND_ENGINE_GR)
+                       init->tt_ctxdma_handle = 1;
+       }
+
+       if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
+               return nouveau_abi16_put(abi16, -EINVAL);
+
        /* allocate "abi16 channel" data and make up a handle for it */
        init->channel = ffsll(~abi16->handles);
        if (!init->channel--)
@@ -264,11 +280,6 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
        abi16->handles |= (1 << init->channel);
 
        /* create channel object and initialise dma and fence management */
-       if (device->card_type >= NV_E0) {
-               init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
-               init->tt_ctxdma_handle = 0;
-       }
-
        ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN |
                                  init->channel, init->fb_ctxdma_handle,
                                  init->tt_ctxdma_handle, &chan->chan);
@@ -378,7 +389,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
        struct nouveau_abi16_chan *chan, *temp;
        struct nouveau_abi16_ntfy *ntfy;
        struct nouveau_object *object;
-       struct nv_dma_class args;
+       struct nv_dma_class args = {};
        int ret;
 
        if (unlikely(!abi16))
index 48783e1..d97f200 100644 (file)
@@ -35,6 +35,14 @@ static struct nouveau_dsm_priv {
        acpi_handle rom_handle;
 } nouveau_dsm_priv;
 
+bool nouveau_is_optimus(void) {
+       return nouveau_dsm_priv.optimus_detected;
+}
+
+bool nouveau_is_v1_dsm(void) {
+       return nouveau_dsm_priv.dsm_detected;
+}
+
 #define NOUVEAU_DSM_HAS_MUX 0x1
 #define NOUVEAU_DSM_HAS_OPT 0x2
 
@@ -183,9 +191,7 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero
 
 static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
 {
-       /* perhaps the _DSM functions are mutually exclusive, but prepare for
-        * the future */
-       if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
+       if (!nouveau_dsm_priv.dsm_detected)
                return 0;
        if (id == VGA_SWITCHEROO_IGD)
                return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
@@ -201,7 +207,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
 
        /* Optimus laptops have the card already disabled in
         * nouveau_switcheroo_set_state */
-       if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
+       if (!nouveau_dsm_priv.dsm_detected)
                return 0;
 
        return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
@@ -283,24 +289,24 @@ static bool nouveau_dsm_detect(void)
                        has_optimus = 1;
        }
 
-       if (vga_count == 2 && has_dsm && guid_valid) {
+       /* find the optimus DSM or the old v1 DSM */
+       if (has_optimus == 1) {
                acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
                        &buffer);
-               printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
+               printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
                        acpi_method_name);
-               nouveau_dsm_priv.dsm_detected = true;
+               nouveau_dsm_priv.optimus_detected = true;
                ret = true;
-       }
-
-       if (has_optimus == 1) {
+       } else if (vga_count == 2 && has_dsm && guid_valid) {
                acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
                        &buffer);
-               printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
+               printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
                        acpi_method_name);
-               nouveau_dsm_priv.optimus_detected = true;
+               nouveau_dsm_priv.dsm_detected = true;
                ret = true;
        }
 
+
        return ret;
 }
 
index 08af677..d0da230 100644 (file)
@@ -4,6 +4,8 @@
 #define ROM_BIOS_PAGE 4096
 
 #if defined(CONFIG_ACPI)
+bool nouveau_is_optimus(void);
+bool nouveau_is_v1_dsm(void);
 void nouveau_register_dsm_handler(void);
 void nouveau_unregister_dsm_handler(void);
 void nouveau_switcheroo_optimus_dsm(void);
@@ -11,6 +13,8 @@ int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
 bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
 void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
 #else
+static inline bool nouveau_is_optimus(void) { return false; };
+static inline bool nouveau_is_v1_dsm(void) { return false; };
 static inline void nouveau_register_dsm_handler(void) {}
 static inline void nouveau_unregister_dsm_handler(void) {}
 static inline void nouveau_switcheroo_optimus_dsm(void) {}
index 09fdef2..865eddf 100644 (file)
@@ -624,206 +624,6 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
        return 0;
 }
 
-/* BIT 'U'/'d' table encoder subtables have hashes matching them to
- * a particular set of encoders.
- *
- * This function returns true if a particular DCB entry matches.
- */
-bool
-bios_encoder_match(struct dcb_output *dcb, u32 hash)
-{
-       if ((hash & 0x000000f0) != (dcb->location << 4))
-               return false;
-       if ((hash & 0x0000000f) != dcb->type)
-               return false;
-       if (!(hash & (dcb->or << 16)))
-               return false;
-
-       switch (dcb->type) {
-       case DCB_OUTPUT_TMDS:
-       case DCB_OUTPUT_LVDS:
-       case DCB_OUTPUT_DP:
-               if (hash & 0x00c00000) {
-                       if (!(hash & (dcb->sorconf.link << 22)))
-                               return false;
-               }
-       default:
-               return true;
-       }
-}
-
-int
-nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
-                              struct dcb_output *dcbent, int crtc)
-{
-       /*
-        * The display script table is located by the BIT 'U' table.
-        *
-        * It contains an array of pointers to various tables describing
-        * a particular output type.  The first 32-bits of the output
-        * tables contains similar information to a DCB entry, and is
-        * used to decide whether that particular table is suitable for
-        * the output you want to access.
-        *
-        * The "record header length" field here seems to indicate the
-        * offset of the first configuration entry in the output tables.
-        * This is 10 on most cards I've seen, but 12 has been witnessed
-        * on DP cards, and there's another script pointer within the
-        * header.
-        *
-        * offset + 0   ( 8 bits): version
-        * offset + 1   ( 8 bits): header length
-        * offset + 2   ( 8 bits): record length
-        * offset + 3   ( 8 bits): number of records
-        * offset + 4   ( 8 bits): record header length
-        * offset + 5   (16 bits): pointer to first output script table
-        */
-
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nvbios *bios = &drm->vbios;
-       uint8_t *table = &bios->data[bios->display.script_table_ptr];
-       uint8_t *otable = NULL;
-       uint16_t script;
-       int i;
-
-       if (!bios->display.script_table_ptr) {
-               NV_ERROR(drm, "No pointer to output script table\n");
-               return 1;
-       }
-
-       /*
-        * Nothing useful has been in any of the pre-2.0 tables I've seen,
-        * so until they are, we really don't need to care.
-        */
-       if (table[0] < 0x20)
-               return 1;
-
-       if (table[0] != 0x20 && table[0] != 0x21) {
-               NV_ERROR(drm, "Output script table version 0x%02x unknown\n",
-                        table[0]);
-               return 1;
-       }
-
-       /*
-        * The output script tables describing a particular output type
-        * look as follows:
-        *
-        * offset + 0   (32 bits): output this table matches (hash of DCB)
-        * offset + 4   ( 8 bits): unknown
-        * offset + 5   ( 8 bits): number of configurations
-        * offset + 6   (16 bits): pointer to some script
-        * offset + 8   (16 bits): pointer to some script
-        *
-        * headerlen == 10
-        * offset + 10           : configuration 0
-        *
-        * headerlen == 12
-        * offset + 10           : pointer to some script
-        * offset + 12           : configuration 0
-        *
-        * Each config entry is as follows:
-        *
-        * offset + 0   (16 bits): unknown, assumed to be a match value
-        * offset + 2   (16 bits): pointer to script table (clock set?)
-        * offset + 4   (16 bits): pointer to script table (reset?)
-        *
-        * There doesn't appear to be a count value to say how many
-        * entries exist in each script table, instead, a 0 value in
-        * the first 16-bit word seems to indicate both the end of the
-        * list and the default entry.  The second 16-bit word in the
-        * script tables is a pointer to the script to execute.
-        */
-
-       NV_DEBUG(drm, "Searching for output entry for %d %d %d\n",
-                       dcbent->type, dcbent->location, dcbent->or);
-       for (i = 0; i < table[3]; i++) {
-               otable = ROMPTR(dev, table[table[1] + (i * table[2])]);
-               if (otable && bios_encoder_match(dcbent, ROM32(otable[0])))
-                       break;
-       }
-
-       if (!otable) {
-               NV_DEBUG(drm, "failed to match any output table\n");
-               return 1;
-       }
-
-       if (pclk < -2 || pclk > 0) {
-               /* Try to find matching script table entry */
-               for (i = 0; i < otable[5]; i++) {
-                       if (ROM16(otable[table[4] + i*6]) == type)
-                               break;
-               }
-
-               if (i == otable[5]) {
-                       NV_ERROR(drm, "Table 0x%04x not found for %d/%d, "
-                                     "using first\n",
-                                type, dcbent->type, dcbent->or);
-                       i = 0;
-               }
-       }
-
-       if (pclk == 0) {
-               script = ROM16(otable[6]);
-               if (!script) {
-                       NV_DEBUG(drm, "output script 0 not found\n");
-                       return 1;
-               }
-
-               NV_DEBUG(drm, "0x%04X: parsing output script 0\n", script);
-               nouveau_bios_run_init_table(dev, script, dcbent, crtc);
-       } else
-       if (pclk == -1) {
-               script = ROM16(otable[8]);
-               if (!script) {
-                       NV_DEBUG(drm, "output script 1 not found\n");
-                       return 1;
-               }
-
-               NV_DEBUG(drm, "0x%04X: parsing output script 1\n", script);
-               nouveau_bios_run_init_table(dev, script, dcbent, crtc);
-       } else
-       if (pclk == -2) {
-               if (table[4] >= 12)
-                       script = ROM16(otable[10]);
-               else
-                       script = 0;
-               if (!script) {
-                       NV_DEBUG(drm, "output script 2 not found\n");
-                       return 1;
-               }
-
-               NV_DEBUG(drm, "0x%04X: parsing output script 2\n", script);
-               nouveau_bios_run_init_table(dev, script, dcbent, crtc);
-       } else
-       if (pclk > 0) {
-               script = ROM16(otable[table[4] + i*6 + 2]);
-               if (script)
-                       script = clkcmptable(bios, script, pclk);
-               if (!script) {
-                       NV_DEBUG(drm, "clock script 0 not found\n");
-                       return 1;
-               }
-
-               NV_DEBUG(drm, "0x%04X: parsing clock script 0\n", script);
-               nouveau_bios_run_init_table(dev, script, dcbent, crtc);
-       } else
-       if (pclk < 0) {
-               script = ROM16(otable[table[4] + i*6 + 4]);
-               if (script)
-                       script = clkcmptable(bios, script, -pclk);
-               if (!script) {
-                       NV_DEBUG(drm, "clock script 1 not found\n");
-                       return 1;
-               }
-
-               NV_DEBUG(drm, "0x%04X: parsing clock script 1\n", script);
-               nouveau_bios_run_init_table(dev, script, dcbent, crtc);
-       }
-
-       return 0;
-}
-
-
 int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, int pxclk)
 {
        /*
@@ -1212,31 +1012,6 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
        return 0;
 }
 
-static int
-parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
-                     struct bit_entry *bitentry)
-{
-       /*
-        * Parses the pointer to the G80 output script tables
-        *
-        * Starting at bitentry->offset:
-        *
-        * offset + 0  (16 bits): output script table pointer
-        */
-
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       uint16_t outputscripttableptr;
-
-       if (bitentry->length != 3) {
-               NV_ERROR(drm, "Do not understand BIT U table\n");
-               return -EINVAL;
-       }
-
-       outputscripttableptr = ROM16(bios->data[bitentry->offset]);
-       bios->display.script_table_ptr = outputscripttableptr;
-       return 0;
-}
-
 struct bit_table {
        const char id;
        int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
@@ -1313,7 +1088,6 @@ parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
        parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */
        parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds));
        parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds));
-       parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U));
 
        return 0;
 }
@@ -2324,7 +2098,7 @@ nouveau_run_vbios_init(struct drm_device *dev)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nvbios *bios = &drm->vbios;
-       int i, ret = 0;
+       int ret = 0;
 
        /* Reset the BIOS head to 0. */
        bios->state.crtchead = 0;
@@ -2337,13 +2111,6 @@ nouveau_run_vbios_init(struct drm_device *dev)
                bios->fp.lvds_init_run = false;
        }
 
-       if (nv_device(drm->device)->card_type >= NV_50) {
-               for (i = 0; bios->execute && i < bios->dcb.entries; i++) {
-                       nouveau_bios_run_display_table(dev, 0, 0,
-                                                      &bios->dcb.entry[i], -1);
-               }
-       }
-
        return ret;
 }
 
index 3befbb8..f68c54c 100644 (file)
@@ -128,12 +128,6 @@ struct nvbios {
        } state;
 
        struct {
-               struct dcb_output *output;
-               int crtc;
-               uint16_t script_table_ptr;
-       } display;
-
-       struct {
                uint16_t fptablepointer;        /* also used by tmds */
                uint16_t fpxlatetableptr;
                int xlatwidth;
@@ -185,8 +179,6 @@ void nouveau_bios_takedown(struct drm_device *dev);
 int nouveau_run_vbios_init(struct drm_device *);
 struct dcb_connector_table_entry *
 nouveau_bios_connector_entry(struct drm_device *, int index);
-int nouveau_bios_run_display_table(struct drm_device *, u16 id, int clk,
-                                         struct dcb_output *, int crtc);
 bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
 uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
 int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
@@ -195,6 +187,5 @@ int run_tmds_table(struct drm_device *, struct dcb_output *,
                          int head, int pxclk);
 int call_lvds_script(struct drm_device *, struct dcb_output *, int head,
                            enum LVDS_script, int pxclk);
-bool bios_encoder_match(struct dcb_output *, u32 hash);
 
 #endif
index 4c950b4..5614c89 100644 (file)
@@ -315,7 +315,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
 
        nouveau_bo_placement_set(nvbo, memtype, 0);
 
-       ret = nouveau_bo_validate(nvbo, false, false, false);
+       ret = nouveau_bo_validate(nvbo, false, false);
        if (ret == 0) {
                switch (bo->mem.mem_type) {
                case TTM_PL_VRAM:
@@ -351,7 +351,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
 
        nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
 
-       ret = nouveau_bo_validate(nvbo, false, false, false);
+       ret = nouveau_bo_validate(nvbo, false, false);
        if (ret == 0) {
                switch (bo->mem.mem_type) {
                case TTM_PL_VRAM:
@@ -392,12 +392,12 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
 
 int
 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
-                   bool no_wait_reserve, bool no_wait_gpu)
+                   bool no_wait_gpu)
 {
        int ret;
 
-       ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
-                             no_wait_reserve, no_wait_gpu);
+       ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
+                             interruptible, no_wait_gpu);
        if (ret)
                return ret;
 
@@ -556,8 +556,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
 static int
 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
                              struct nouveau_bo *nvbo, bool evict,
-                             bool no_wait_reserve, bool no_wait_gpu,
-                             struct ttm_mem_reg *new_mem)
+                             bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
        struct nouveau_fence *fence = NULL;
        int ret;
@@ -567,7 +566,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
                return ret;
 
        ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
-                                       no_wait_reserve, no_wait_gpu, new_mem);
+                                       no_wait_gpu, new_mem);
        nouveau_fence_unref(&fence);
        return ret;
 }
@@ -965,8 +964,7 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
 
 static int
 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
-                    bool no_wait_reserve, bool no_wait_gpu,
-                    struct ttm_mem_reg *new_mem)
+                    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
        struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
        struct nouveau_channel *chan = chan = drm->channel;
@@ -995,7 +993,6 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
        ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
        if (ret == 0) {
                ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
-                                                   no_wait_reserve,
                                                    no_wait_gpu, new_mem);
        }
 
@@ -1064,8 +1061,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
 
 static int
 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
-                     bool no_wait_reserve, bool no_wait_gpu,
-                     struct ttm_mem_reg *new_mem)
+                     bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
        u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
        struct ttm_placement placement;
@@ -1078,7 +1074,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
 
        tmp_mem = *new_mem;
        tmp_mem.mm_node = NULL;
-       ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
+       ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
        if (ret)
                return ret;
 
@@ -1086,11 +1082,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
        if (ret)
                goto out;
 
-       ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
+       ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
        if (ret)
                goto out;
 
-       ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
+       ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
 out:
        ttm_bo_mem_put(bo, &tmp_mem);
        return ret;
@@ -1098,8 +1094,7 @@ out:
 
 static int
 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
-                     bool no_wait_reserve, bool no_wait_gpu,
-                     struct ttm_mem_reg *new_mem)
+                     bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
        u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
        struct ttm_placement placement;
@@ -1112,15 +1107,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
 
        tmp_mem = *new_mem;
        tmp_mem.mm_node = NULL;
-       ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
+       ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
        if (ret)
                return ret;
 
-       ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
+       ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
        if (ret)
                goto out;
 
-       ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
+       ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
        if (ret)
                goto out;
 
@@ -1195,8 +1190,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
 
 static int
 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
-               bool no_wait_reserve, bool no_wait_gpu,
-               struct ttm_mem_reg *new_mem)
+               bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
        struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
        struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -1220,23 +1214,26 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
 
        /* CPU copy if we have no accelerated method available */
        if (!drm->ttm.move) {
-               ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+               ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
                goto out;
        }
 
        /* Hardware assisted copy. */
        if (new_mem->mem_type == TTM_PL_SYSTEM)
-               ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+               ret = nouveau_bo_move_flipd(bo, evict, intr,
+                                           no_wait_gpu, new_mem);
        else if (old_mem->mem_type == TTM_PL_SYSTEM)
-               ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+               ret = nouveau_bo_move_flips(bo, evict, intr,
+                                           no_wait_gpu, new_mem);
        else
-               ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+               ret = nouveau_bo_move_m2mf(bo, evict, intr,
+                                          no_wait_gpu, new_mem);
 
        if (!ret)
                goto out;
 
        /* Fallback to software copy. */
-       ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+       ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
 
 out:
        if (nv_device(drm->device)->card_type < NV_50) {
@@ -1343,7 +1340,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
        nvbo->placement.fpfn = 0;
        nvbo->placement.lpfn = mappable;
        nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
-       return nouveau_bo_validate(nvbo, false, true, false);
+       return nouveau_bo_validate(nvbo, false, false);
 }
 
 static int
index dec51b1..25ca379 100644 (file)
@@ -76,7 +76,7 @@ u32  nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
 void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
 void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
 int  nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
-                        bool no_wait_reserve, bool no_wait_gpu);
+                        bool no_wait_gpu);
 
 struct nouveau_vma *
 nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
index c1d7301..174300b 100644 (file)
@@ -76,6 +76,8 @@ nouveau_channel_del(struct nouveau_channel **pchan)
                nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle);
                nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
                nouveau_bo_unmap(chan->push.buffer);
+               if (chan->push.buffer && chan->push.buffer->pin_refcnt)
+                       nouveau_bo_unpin(chan->push.buffer);
                nouveau_bo_ref(NULL, &chan->push.buffer);
                kfree(chan);
        }
@@ -267,7 +269,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
        struct nouveau_fb *pfb = nouveau_fb(device);
        struct nouveau_software_chan *swch;
        struct nouveau_object *object;
-       struct nv_dma_class args;
+       struct nv_dma_class args = {};
        int ret, i;
 
        /* allocate dma objects to cover all allowed vram, and gart */
@@ -346,7 +348,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
        /* allocate software object class (used for fences on <= nv05, and
         * to signal flip completion), bind it to a subchannel.
         */
-       if (chan != chan->drm->cechan) {
+       if ((device->card_type < NV_E0) || gart /* nve0: want_nvsw */) {
                ret = nouveau_object_new(nv_object(client), chan->handle,
                                         NvSw, nouveau_abi16_swclass(chan->drm),
                                         NULL, 0, &object);
index 9a6e2cb..ac340ba 100644 (file)
@@ -110,7 +110,6 @@ nouveau_connector_destroy(struct drm_connector *connector)
        dev  = nv_connector->base.dev;
        drm  = nouveau_drm(dev);
        gpio = nouveau_gpio(drm->device);
-       NV_DEBUG(drm, "\n");
 
        if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) {
                gpio->isr_del(gpio, 0, nv_connector->hpd, 0xff,
@@ -221,7 +220,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
        }
 
        if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
-               drm_connector_property_set_value(connector,
+               drm_object_property_set_value(&connector->base,
                        dev->mode_config.dvi_i_subconnector_property,
                        nv_encoder->dcb->type == DCB_OUTPUT_TMDS ?
                        DRM_MODE_SUBCONNECTOR_DVID :
@@ -355,7 +354,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
         * valid - it's not (rh#613284)
         */
        if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) {
-               if (!(nv_connector->edid = nouveau_acpi_edid(dev, connector))) {
+               if ((nv_connector->edid = nouveau_acpi_edid(dev, connector))) {
                        status = connector_status_connected;
                        goto out;
                }
@@ -929,8 +928,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
        int type, ret = 0;
        bool dummy;
 
-       NV_DEBUG(drm, "\n");
-
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                nv_connector = nouveau_connector(connector);
                if (nv_connector->index == index)
@@ -1043,7 +1040,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
 
        /* Init DVI-I specific properties */
        if (nv_connector->type == DCB_CONNECTOR_DVI_I)
-               drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
+               drm_object_attach_property(&connector->base, dev->mode_config.dvi_i_subconnector_property, 0);
 
        /* Add overscan compensation options to digital outputs */
        if (disp->underscan_property &&
@@ -1051,31 +1048,31 @@ nouveau_connector_create(struct drm_device *dev, int index)
             type == DRM_MODE_CONNECTOR_DVII ||
             type == DRM_MODE_CONNECTOR_HDMIA ||
             type == DRM_MODE_CONNECTOR_DisplayPort)) {
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              disp->underscan_property,
                                              UNDERSCAN_OFF);
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              disp->underscan_hborder_property,
                                              0);
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              disp->underscan_vborder_property,
                                              0);
        }
 
        /* Add hue and saturation options */
        if (disp->vibrant_hue_property)
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              disp->vibrant_hue_property,
                                              90);
        if (disp->color_vibrance_property)
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              disp->color_vibrance_property,
                                              150);
 
        switch (nv_connector->type) {
        case DCB_CONNECTOR_VGA:
                if (nv_device(drm->device)->card_type >= NV_50) {
-                       drm_connector_attach_property(connector,
+                       drm_object_attach_property(&connector->base,
                                        dev->mode_config.scaling_mode_property,
                                        nv_connector->scaling_mode);
                }
@@ -1088,18 +1085,18 @@ nouveau_connector_create(struct drm_device *dev, int index)
        default:
                nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
 
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                dev->mode_config.scaling_mode_property,
                                nv_connector->scaling_mode);
                if (disp->dithering_mode) {
                        nv_connector->dithering_mode = DITHERING_MODE_AUTO;
-                       drm_connector_attach_property(connector,
+                       drm_object_attach_property(&connector->base,
                                                disp->dithering_mode,
                                                nv_connector->dithering_mode);
                }
                if (disp->dithering_depth) {
                        nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
-                       drm_connector_attach_property(connector,
+                       drm_object_attach_property(&connector->base,
                                                disp->dithering_depth,
                                                nv_connector->dithering_depth);
                }
index ebdb876..20eb84c 100644 (file)
@@ -28,6 +28,7 @@
 #define __NOUVEAU_CONNECTOR_H__
 
 #include <drm/drm_edid.h>
+#include "nouveau_crtc.h"
 
 struct nouveau_i2c_port;
 
@@ -80,6 +81,21 @@ static inline struct nouveau_connector *nouveau_connector(
        return container_of(con, struct nouveau_connector, base);
 }
 
+static inline struct nouveau_connector *
+nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
+{
+       struct drm_device *dev = nv_crtc->base.dev;
+       struct drm_connector *connector;
+       struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               if (connector->encoder && connector->encoder->crtc == crtc)
+                       return nouveau_connector(connector);
+       }
+
+       return NULL;
+}
+
 struct drm_connector *
 nouveau_connector_create(struct drm_device *, int index);
 
index e6d0d1e..d1e5890 100644 (file)
@@ -82,16 +82,6 @@ static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
        return &crtc->base;
 }
 
-int nv50_crtc_create(struct drm_device *dev, int index);
-int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv,
-                        uint32_t buffer_handle, uint32_t width,
-                        uint32_t height);
-int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y);
-
 int nv04_cursor_init(struct nouveau_crtc *);
-int nv50_cursor_init(struct nouveau_crtc *);
-
-struct nouveau_connector *
-nouveau_crtc_connector_get(struct nouveau_crtc *crtc);
 
 #endif /* __NOUVEAU_CRTC_H__ */
index 86124b1..e4188f2 100644 (file)
@@ -98,12 +98,12 @@ nouveau_framebuffer_init(struct drm_device *dev,
                        nv_fb->r_dma = NvEvoVRAM_LP;
 
                switch (fb->depth) {
-               case  8: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_8; break;
-               case 15: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_15; break;
-               case 16: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_16; break;
+               case  8: nv_fb->r_format = 0x1e00; break;
+               case 15: nv_fb->r_format = 0xe900; break;
+               case 16: nv_fb->r_format = 0xe800; break;
                case 24:
-               case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break;
-               case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break;
+               case 32: nv_fb->r_format = 0xcf00; break;
+               case 30: nv_fb->r_format = 0xd100; break;
                default:
                         NV_ERROR(drm, "unknown depth %d\n", fb->depth);
                         return -EINVAL;
@@ -324,7 +324,7 @@ nouveau_display_create(struct drm_device *dev)
        disp->underscan_vborder_property =
                drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
 
-       if (gen == 1) {
+       if (gen >= 1) {
                disp->vibrant_hue_property =
                        drm_property_create(dev, DRM_MODE_PROP_RANGE,
                                            "vibrant hue", 2);
@@ -366,10 +366,7 @@ nouveau_display_create(struct drm_device *dev)
                if (nv_device(drm->device)->card_type < NV_50)
                        ret = nv04_display_create(dev);
                else
-               if (nv_device(drm->device)->card_type < NV_D0)
                        ret = nv50_display_create(dev);
-               else
-                       ret = nvd0_display_create(dev);
                if (ret)
                        goto disp_create_err;
 
@@ -400,11 +397,12 @@ nouveau_display_destroy(struct drm_device *dev)
        nouveau_backlight_exit(dev);
        drm_vblank_cleanup(dev);
 
+       drm_kms_helper_poll_fini(dev);
+       drm_mode_config_cleanup(dev);
+
        if (disp->dtor)
                disp->dtor(dev);
 
-       drm_kms_helper_poll_fini(dev);
-       drm_mode_config_cleanup(dev);
        nouveau_drm(dev)->display = NULL;
        kfree(disp);
 }
@@ -659,10 +657,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 
        /* Emit a page flip */
        if (nv_device(drm->device)->card_type >= NV_50) {
-               if (nv_device(drm->device)->card_type >= NV_D0)
-                       ret = nvd0_display_flip_next(crtc, fb, chan, 0);
-               else
-                       ret = nv50_display_flip_next(crtc, fb, chan);
+               ret = nv50_display_flip_next(crtc, fb, chan, 0);
                if (ret) {
                        mutex_unlock(&chan->cli->mutex);
                        goto fail_unreserve;
index 978a108..5983865 100644 (file)
 #include "nouveau_encoder.h"
 #include "nouveau_crtc.h"
 
+#include <core/class.h>
+
 #include <subdev/gpio.h>
 #include <subdev/i2c.h>
 
-u8 *
-nouveau_dp_bios_data(struct drm_device *dev, struct dcb_output *dcb, u8 **entry)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct bit_entry d;
-       u8 *table;
-       int i;
-
-       if (bit_table(dev, 'd', &d)) {
-               NV_ERROR(drm, "BIT 'd' table not found\n");
-               return NULL;
-       }
-
-       if (d.version != 1) {
-               NV_ERROR(drm, "BIT 'd' table version %d unknown\n", d.version);
-               return NULL;
-       }
-
-       table = ROMPTR(dev, d.data[0]);
-       if (!table) {
-               NV_ERROR(drm, "displayport table pointer invalid\n");
-               return NULL;
-       }
-
-       switch (table[0]) {
-       case 0x20:
-       case 0x21:
-       case 0x30:
-       case 0x40:
-               break;
-       default:
-               NV_ERROR(drm, "displayport table 0x%02x unknown\n", table[0]);
-               return NULL;
-       }
-
-       for (i = 0; i < table[3]; i++) {
-               *entry = ROMPTR(dev, table[table[1] + (i * table[2])]);
-               if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0])))
-                       return table;
-       }
-
-       NV_ERROR(drm, "displayport encoder table not found\n");
-       return NULL;
-}
-
 /******************************************************************************
  * link training
  *****************************************************************************/
 struct dp_state {
        struct nouveau_i2c_port *auxch;
-       struct dp_train_func *func;
+       struct nouveau_object *core;
        struct dcb_output *dcb;
        int crtc;
        u8 *dpcd;
@@ -97,13 +54,20 @@ static void
 dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
+       struct dcb_output *dcb = dp->dcb;
+       const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+       const u32 moff = (dp->crtc << 3) | (link << 2) | or;
        u8 sink[2];
+       u32 data;
 
        NV_DEBUG(drm, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
 
        /* set desired link configuration on the source */
-       dp->func->link_set(dev, dp->dcb, dp->crtc, dp->link_nr, dp->link_bw,
-                          dp->dpcd[2] & DP_ENHANCED_FRAME_CAP);
+       data = ((dp->link_bw / 27000) << 8) | dp->link_nr;
+       if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
+               data |= NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH;
+
+       nv_call(dp->core, NV94_DISP_SOR_DP_LNKCTL + moff, data);
 
        /* inform the sink of the new configuration */
        sink[0] = dp->link_bw / 27000;
@@ -118,11 +82,14 @@ static void
 dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
+       struct dcb_output *dcb = dp->dcb;
+       const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+       const u32 moff = (dp->crtc << 3) | (link << 2) | or;
        u8 sink_tp;
 
        NV_DEBUG(drm, "training pattern %d\n", pattern);
 
-       dp->func->train_set(dev, dp->dcb, pattern);
+       nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, pattern);
 
        nv_rdaux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
        sink_tp &= ~DP_TRAINING_PATTERN_MASK;
@@ -134,6 +101,9 @@ static int
 dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
+       struct dcb_output *dcb = dp->dcb;
+       const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+       const u32 moff = (dp->crtc << 3) | (link << 2) | or;
        int i;
 
        for (i = 0; i < dp->link_nr; i++) {
@@ -148,7 +118,8 @@ dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
                        dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
 
                NV_DEBUG(drm, "config lane %d %02x\n", i, dp->conf[i]);
-               dp->func->train_adj(dev, dp->dcb, i, lvsw, lpre);
+
+               nv_call(dp->core, NV94_DISP_SOR_DP_DRVCTL(i) + moff, (lvsw << 8) | lpre);
        }
 
        return nv_wraux(dp->auxch, DP_TRAINING_LANE0_SET, dp->conf, 4);
@@ -234,59 +205,32 @@ dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
 }
 
 static void
-dp_set_downspread(struct drm_device *dev, struct dp_state *dp, bool enable)
+dp_link_train_init(struct drm_device *dev, struct dp_state *dp, bool spread)
 {
-       u16 script = 0x0000;
-       u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
-       if (table) {
-               if (table[0] >= 0x20 && table[0] <= 0x30) {
-                       if (enable) script = ROM16(entry[12]);
-                       else        script = ROM16(entry[14]);
-               } else
-               if (table[0] == 0x40) {
-                       if (enable) script = ROM16(entry[11]);
-                       else        script = ROM16(entry[13]);
-               }
-       }
-
-       nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
-}
-
-static void
-dp_link_train_init(struct drm_device *dev, struct dp_state *dp)
-{
-       u16 script = 0x0000;
-       u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
-       if (table) {
-               if (table[0] >= 0x20 && table[0] <= 0x30)
-                       script = ROM16(entry[6]);
-               else
-               if (table[0] == 0x40)
-                       script = ROM16(entry[5]);
-       }
-
-       nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
+       struct dcb_output *dcb = dp->dcb;
+       const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+       const u32 moff = (dp->crtc << 3) | (link << 2) | or;
+
+       nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, (spread ?
+                         NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON :
+                         NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF) |
+                         NV94_DISP_SOR_DP_TRAIN_OP_INIT);
 }
 
 static void
 dp_link_train_fini(struct drm_device *dev, struct dp_state *dp)
 {
-       u16 script = 0x0000;
-       u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
-       if (table) {
-               if (table[0] >= 0x20 && table[0] <= 0x30)
-                       script = ROM16(entry[8]);
-               else
-               if (table[0] == 0x40)
-                       script = ROM16(entry[7]);
-       }
+       struct dcb_output *dcb = dp->dcb;
+       const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+       const u32 moff = (dp->crtc << 3) | (link << 2) | or;
 
-       nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
+       nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff,
+                         NV94_DISP_SOR_DP_TRAIN_OP_FINI);
 }
 
 static bool
 nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
-                     struct dp_train_func *func)
+                     struct nouveau_object *core)
 {
        struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
        struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
@@ -304,7 +248,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
        if (!dp.auxch)
                return false;
 
-       dp.func = func;
+       dp.core = core;
        dp.dcb = nv_encoder->dcb;
        dp.crtc = nv_crtc->index;
        dp.dpcd = nv_encoder->dp.dpcd;
@@ -318,11 +262,8 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
         */
        gpio->irq(gpio, 0, nv_connector->hpd, 0xff, false);
 
-       /* enable down-spreading, if possible */
-       dp_set_downspread(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
-
-       /* execute pre-train script from vbios */
-       dp_link_train_init(dev, &dp);
+       /* enable down-spreading and execute pre-train script from vbios */
+       dp_link_train_init(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
 
        /* start off at highest link rate supported by encoder and display */
        while (*link_bw > nv_encoder->dp.link_bw)
@@ -365,7 +306,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
 
 void
 nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
-               struct dp_train_func *func)
+               struct nouveau_object *core)
 {
        struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
        struct nouveau_drm *drm = nouveau_drm(encoder->dev);
@@ -385,7 +326,7 @@ nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
        nv_wraux(auxch, DP_SET_POWER, &status, 1);
 
        if (mode == DRM_MODE_DPMS_ON)
-               nouveau_dp_link_train(encoder, datarate, func);
+               nouveau_dp_link_train(encoder, datarate, core);
 }
 
 static void
index 8244863..01c403d 100644 (file)
@@ -49,8 +49,6 @@
 #include "nouveau_fbcon.h"
 #include "nouveau_fence.h"
 
-#include "nouveau_ttm.h"
-
 MODULE_PARM_DESC(config, "option string to pass to driver core");
 static char *nouveau_config;
 module_param_named(config, nouveau_config, charp, 0400);
@@ -129,7 +127,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
 
        /* initialise synchronisation routines */
        if      (device->card_type < NV_10) ret = nv04_fence_create(drm);
-       else if (device->chipset   <  0x84) ret = nv10_fence_create(drm);
+       else if (device->card_type < NV_50) ret = nv10_fence_create(drm);
+       else if (device->chipset   <  0x84) ret = nv50_fence_create(drm);
        else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
        else                                ret = nvc0_fence_create(drm);
        if (ret) {
@@ -148,7 +147,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
                        NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
 
                arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
-               arg1 = 0;
+               arg1 = 1;
        } else {
                arg0 = NvDmaFB;
                arg1 = NvDmaTT;
@@ -395,17 +394,12 @@ nouveau_drm_remove(struct pci_dev *pdev)
 }
 
 int
-nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
+nouveau_do_suspend(struct drm_device *dev)
 {
-       struct drm_device *dev = pci_get_drvdata(pdev);
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_cli *cli;
        int ret;
 
-       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
-           pm_state.event == PM_EVENT_PRETHAW)
-               return 0;
-
        if (dev->mode_config.num_crtc) {
                NV_INFO(drm, "suspending fbcon...\n");
                nouveau_fbcon_set_suspend(dev, 1);
@@ -436,13 +430,6 @@ nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
                goto fail_client;
 
        nouveau_agp_fini(drm);
-
-       pci_save_state(pdev);
-       if (pm_state.event == PM_EVENT_SUSPEND) {
-               pci_disable_device(pdev);
-               pci_set_power_state(pdev, PCI_D3hot);
-       }
-
        return 0;
 
 fail_client:
@@ -457,24 +444,33 @@ fail_client:
        return ret;
 }
 
-int
-nouveau_drm_resume(struct pci_dev *pdev)
+int nouveau_pmops_suspend(struct device *dev)
 {
-       struct drm_device *dev = pci_get_drvdata(pdev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_cli *cli;
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
        int ret;
 
-       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+       if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       NV_INFO(drm, "re-enabling device...\n");
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
-       ret = pci_enable_device(pdev);
+       ret = nouveau_do_suspend(drm_dev);
        if (ret)
                return ret;
-       pci_set_master(pdev);
+
+       pci_save_state(pdev);
+       pci_disable_device(pdev);
+       pci_set_power_state(pdev, PCI_D3hot);
+
+       return 0;
+}
+
+int
+nouveau_do_resume(struct drm_device *dev)
+{
+       struct nouveau_drm *drm = nouveau_drm(dev);
+       struct nouveau_cli *cli;
+
+       NV_INFO(drm, "re-enabling device...\n");
 
        nouveau_agp_reset(drm);
 
@@ -500,6 +496,42 @@ nouveau_drm_resume(struct pci_dev *pdev)
        return 0;
 }
 
+int nouveau_pmops_resume(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+       int ret;
+
+       if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+               return 0;
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+       ret = pci_enable_device(pdev);
+       if (ret)
+               return ret;
+       pci_set_master(pdev);
+
+       return nouveau_do_resume(drm_dev);
+}
+
+static int nouveau_pmops_freeze(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+       return nouveau_do_suspend(drm_dev);
+}
+
+static int nouveau_pmops_thaw(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+       return nouveau_do_resume(drm_dev);
+}
+
+
 static int
 nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
 {
@@ -652,14 +684,22 @@ nouveau_drm_pci_table[] = {
        {}
 };
 
+static const struct dev_pm_ops nouveau_pm_ops = {
+       .suspend = nouveau_pmops_suspend,
+       .resume = nouveau_pmops_resume,
+       .freeze = nouveau_pmops_freeze,
+       .thaw = nouveau_pmops_thaw,
+       .poweroff = nouveau_pmops_freeze,
+       .restore = nouveau_pmops_resume,
+};
+
 static struct pci_driver
 nouveau_drm_pci_driver = {
        .name = "nouveau",
        .id_table = nouveau_drm_pci_table,
        .probe = nouveau_drm_probe,
        .remove = nouveau_drm_remove,
-       .suspend = nouveau_drm_suspend,
-       .resume = nouveau_drm_resume,
+       .driver.pm = &nouveau_pm_ops,
 };
 
 static int __init
index a101699..aa89eb9 100644 (file)
@@ -129,8 +129,8 @@ nouveau_dev(struct drm_device *dev)
        return nv_device(nouveau_drm(dev)->device);
 }
 
-int nouveau_drm_suspend(struct pci_dev *, pm_message_t);
-int nouveau_drm_resume(struct pci_dev *);
+int nouveau_pmops_suspend(struct device *);
+int nouveau_pmops_resume(struct device *);
 
 #define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
 #define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
index 6a17bf2..d0d95bd 100644 (file)
@@ -93,14 +93,9 @@ get_slave_funcs(struct drm_encoder *enc)
 /* nouveau_dp.c */
 bool nouveau_dp_detect(struct drm_encoder *);
 void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate,
-                    struct dp_train_func *);
-u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_output *, u8 **);
+                    struct nouveau_object *);
 
 struct nouveau_connector *
 nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
-int nv50_sor_create(struct drm_connector *, struct dcb_output *);
-void nv50_sor_dp_calc_tu(struct drm_device *, int, int, u32, u32);
-int nv50_dac_create(struct drm_connector *, struct dcb_output *);
-
 
 #endif /* __NOUVEAU_ENCODER_H__ */
index 5e2f521..8bf695c 100644 (file)
@@ -433,7 +433,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
                        return ret;
                }
 
-               ret = nouveau_bo_validate(nvbo, true, false, false);
+               ret = nouveau_bo_validate(nvbo, true, false);
                if (unlikely(ret)) {
                        if (ret != -ERESTARTSYS)
                                NV_ERROR(drm, "fail ttm_validate\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
deleted file mode 100644 (file)
index 2c672ce..0000000
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drm.h"
-#include "nouveau_connector.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-
-static bool
-hdmi_sor(struct drm_encoder *encoder)
-{
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       if (nv_device(drm->device)->chipset <  0xa3 ||
-           nv_device(drm->device)->chipset == 0xaa ||
-           nv_device(drm->device)->chipset == 0xac)
-               return false;
-       return true;
-}
-
-static inline u32
-hdmi_base(struct drm_encoder *encoder)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
-       if (!hdmi_sor(encoder))
-               return 0x616500 + (nv_crtc->index * 0x800);
-       return 0x61c500 + (nv_encoder->or * 0x800);
-}
-
-static void
-hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val)
-{
-       struct nouveau_device *device = nouveau_dev(encoder->dev);
-       nv_wr32(device, hdmi_base(encoder) + reg, val);
-}
-
-static u32
-hdmi_rd32(struct drm_encoder *encoder, u32 reg)
-{
-       struct nouveau_device *device = nouveau_dev(encoder->dev);
-       return nv_rd32(device, hdmi_base(encoder) + reg);
-}
-
-static u32
-hdmi_mask(struct drm_encoder *encoder, u32 reg, u32 mask, u32 val)
-{
-       u32 tmp = hdmi_rd32(encoder, reg);
-       hdmi_wr32(encoder, reg, (tmp & ~mask) | val);
-       return tmp;
-}
-
-static void
-nouveau_audio_disconnect(struct drm_encoder *encoder)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_device *device = nouveau_dev(encoder->dev);
-       u32 or = nv_encoder->or * 0x800;
-
-       if (hdmi_sor(encoder))
-               nv_mask(device, 0x61c448 + or, 0x00000003, 0x00000000);
-}
-
-static void
-nouveau_audio_mode_set(struct drm_encoder *encoder,
-                      struct drm_display_mode *mode)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_device *device = nouveau_dev(encoder->dev);
-       struct nouveau_connector *nv_connector;
-       u32 or = nv_encoder->or * 0x800;
-       int i;
-
-       nv_connector = nouveau_encoder_connector_get(nv_encoder);
-       if (!drm_detect_monitor_audio(nv_connector->edid)) {
-               nouveau_audio_disconnect(encoder);
-               return;
-       }
-
-       if (hdmi_sor(encoder)) {
-               nv_mask(device, 0x61c448 + or, 0x00000001, 0x00000001);
-
-               drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
-               if (nv_connector->base.eld[0]) {
-                       u8 *eld = nv_connector->base.eld;
-                       for (i = 0; i < eld[2] * 4; i++)
-                               nv_wr32(device, 0x61c440 + or, (i << 8) | eld[i]);
-                       for (i = eld[2] * 4; i < 0x60; i++)
-                               nv_wr32(device, 0x61c440 + or, (i << 8) | 0x00);
-                       nv_mask(device, 0x61c448 + or, 0x00000002, 0x00000002);
-               }
-       }
-}
-
-static void
-nouveau_hdmi_infoframe(struct drm_encoder *encoder, u32 ctrl, u8 *frame)
-{
-       /* calculate checksum for the infoframe */
-       u8 sum = 0, i;
-       for (i = 0; i < frame[2]; i++)
-               sum += frame[i];
-       frame[3] = 256 - sum;
-
-       /* disable infoframe, and write header */
-       hdmi_mask(encoder, ctrl + 0x00, 0x00000001, 0x00000000);
-       hdmi_wr32(encoder, ctrl + 0x08, *(u32 *)frame & 0xffffff);
-
-       /* register scans tell me the audio infoframe has only one set of
-        * subpack regs, according to tegra (gee nvidia, it'd be nice if we
-        * could get those docs too!), the hdmi block pads out the rest of
-        * the packet on its own.
-        */
-       if (ctrl == 0x020)
-               frame[2] = 6;
-
-       /* write out checksum and data, weird weird 7 byte register pairs */
-       for (i = 0; i < frame[2] + 1; i += 7) {
-               u32 rsubpack = ctrl + 0x0c + ((i / 7) * 8);
-               u32 *subpack = (u32 *)&frame[3 + i];
-               hdmi_wr32(encoder, rsubpack + 0, subpack[0]);
-               hdmi_wr32(encoder, rsubpack + 4, subpack[1] & 0xffffff);
-       }
-
-       /* enable the infoframe */
-       hdmi_mask(encoder, ctrl, 0x00000001, 0x00000001);
-}
-
-static void
-nouveau_hdmi_video_infoframe(struct drm_encoder *encoder,
-                            struct drm_display_mode *mode)
-{
-       const u8 Y = 0, A = 0, B = 0, S = 0, C = 0, M = 0, R = 0;
-       const u8 ITC = 0, EC = 0, Q = 0, SC = 0, VIC = 0, PR = 0;
-       const u8 bar_top = 0, bar_bottom = 0, bar_left = 0, bar_right = 0;
-       u8 frame[20];
-
-       frame[0x00] = 0x82; /* AVI infoframe */
-       frame[0x01] = 0x02; /* version */
-       frame[0x02] = 0x0d; /* length */
-       frame[0x03] = 0x00;
-       frame[0x04] = (Y << 5) | (A << 4) | (B << 2) | S;
-       frame[0x05] = (C << 6) | (M << 4) | R;
-       frame[0x06] = (ITC << 7) | (EC << 4) | (Q << 2) | SC;
-       frame[0x07] = VIC;
-       frame[0x08] = PR;
-       frame[0x09] = bar_top & 0xff;
-       frame[0x0a] = bar_top >> 8;
-       frame[0x0b] = bar_bottom & 0xff;
-       frame[0x0c] = bar_bottom >> 8;
-       frame[0x0d] = bar_left & 0xff;
-       frame[0x0e] = bar_left >> 8;
-       frame[0x0f] = bar_right & 0xff;
-       frame[0x10] = bar_right >> 8;
-       frame[0x11] = 0x00;
-       frame[0x12] = 0x00;
-       frame[0x13] = 0x00;
-
-       nouveau_hdmi_infoframe(encoder, 0x020, frame);
-}
-
-static void
-nouveau_hdmi_audio_infoframe(struct drm_encoder *encoder,
-                            struct drm_display_mode *mode)
-{
-       const u8 CT = 0x00, CC = 0x01, ceaSS = 0x00, SF = 0x00, FMT = 0x00;
-       const u8 CA = 0x00, DM_INH = 0, LSV = 0x00;
-       u8 frame[12];
-
-       frame[0x00] = 0x84;     /* Audio infoframe */
-       frame[0x01] = 0x01;     /* version */
-       frame[0x02] = 0x0a;     /* length */
-       frame[0x03] = 0x00;
-       frame[0x04] = (CT << 4) | CC;
-       frame[0x05] = (SF << 2) | ceaSS;
-       frame[0x06] = FMT;
-       frame[0x07] = CA;
-       frame[0x08] = (DM_INH << 7) | (LSV << 3);
-       frame[0x09] = 0x00;
-       frame[0x0a] = 0x00;
-       frame[0x0b] = 0x00;
-
-       nouveau_hdmi_infoframe(encoder, 0x000, frame);
-}
-
-static void
-nouveau_hdmi_disconnect(struct drm_encoder *encoder)
-{
-       nouveau_audio_disconnect(encoder);
-
-       /* disable audio and avi infoframes */
-       hdmi_mask(encoder, 0x000, 0x00000001, 0x00000000);
-       hdmi_mask(encoder, 0x020, 0x00000001, 0x00000000);
-
-       /* disable hdmi */
-       hdmi_mask(encoder, 0x0a4, 0x40000000, 0x00000000);
-}
-
-void
-nouveau_hdmi_mode_set(struct drm_encoder *encoder,
-                     struct drm_display_mode *mode)
-{
-       struct nouveau_device *device = nouveau_dev(encoder->dev);
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_connector *nv_connector;
-       u32 max_ac_packet, rekey;
-
-       nv_connector = nouveau_encoder_connector_get(nv_encoder);
-       if (!mode || !nv_connector || !nv_connector->edid ||
-           !drm_detect_hdmi_monitor(nv_connector->edid)) {
-               nouveau_hdmi_disconnect(encoder);
-               return;
-       }
-
-       nouveau_hdmi_video_infoframe(encoder, mode);
-       nouveau_hdmi_audio_infoframe(encoder, mode);
-
-       hdmi_mask(encoder, 0x0d0, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
-       hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
-       hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
-
-       nv_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
-       nv_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
-       nv_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
-
-       /* value matches nvidia binary driver, and tegra constant */
-       rekey = 56;
-
-       max_ac_packet  = mode->htotal - mode->hdisplay;
-       max_ac_packet -= rekey;
-       max_ac_packet -= 18; /* constant from tegra */
-       max_ac_packet /= 32;
-
-       /* enable hdmi */
-       hdmi_mask(encoder, 0x0a4, 0x5f1f003f, 0x40000000 | /* enable */
-                                             0x1f000000 | /* unknown */
-                                             max_ac_packet << 16 |
-                                             rekey);
-
-       nouveau_audio_mode_set(encoder, mode);
-}
index 1d8cb50..1303680 100644 (file)
@@ -60,18 +60,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
                return IRQ_NONE;
 
        nv_subdev(pmc)->intr(nv_subdev(pmc));
-
-       if (dev->mode_config.num_crtc) {
-               if (device->card_type >= NV_D0) {
-                       if (nv_rd32(device, 0x000100) & 0x04000000)
-                               nvd0_display_intr(dev);
-               } else
-               if (device->card_type >= NV_50) {
-                       if (nv_rd32(device, 0x000100) & 0x04000000)
-                               nv50_display_intr(dev);
-               }
-       }
-
        return IRQ_HANDLED;
 }
 
index 366462c..3543fec 100644 (file)
@@ -155,10 +155,6 @@ nouveau_prime_new(struct drm_device *dev,
                return ret;
        nvbo = *pnvbo;
 
-       /* we restrict allowed domains on nv50+ to only the types
-        * that were requested at creation time.  not possibly on
-        * earlier chips without busting the ABI.
-        */
        nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
        nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
        if (!nvbo->gem) {
index 6f0ac64..25d3495 100644 (file)
@@ -31,12 +31,11 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
                             enum vga_switcheroo_state state)
 {
        struct drm_device *dev = pci_get_drvdata(pdev);
-       pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
 
        if (state == VGA_SWITCHEROO_ON) {
                printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-               nouveau_drm_resume(pdev);
+               nouveau_pmops_resume(&pdev->dev);
                drm_kms_helper_poll_enable(dev);
                dev->switch_power_state = DRM_SWITCH_POWER_ON;
        } else {
@@ -44,7 +43,7 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
                drm_kms_helper_poll_disable(dev);
                nouveau_switcheroo_optimus_dsm();
-               nouveau_drm_suspend(pdev, pmm);
+               nouveau_pmops_suspend(&pdev->dev);
                dev->switch_power_state = DRM_SWITCH_POWER_OFF;
        }
 }
index 82a0d9c..6578cd2 100644 (file)
@@ -730,6 +730,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
        drm_crtc_cleanup(crtc);
 
        nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+       nouveau_bo_unpin(nv_crtc->cursor.nvbo);
        nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
        kfree(nv_crtc);
 }
@@ -1056,8 +1057,11 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
                             0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
        if (!ret) {
                ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
-               if (!ret)
+               if (!ret) {
                        ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
+                       if (ret)
+                               nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+               }
                if (ret)
                        nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
        }
index 846050f..2cd6fb8 100644 (file)
@@ -60,8 +60,6 @@ nv04_display_create(struct drm_device *dev)
        struct nv04_display *disp;
        int i, ret;
 
-       NV_DEBUG(drm, "\n");
-
        disp = kzalloc(sizeof(*disp), GFP_KERNEL);
        if (!disp)
                return -ENOMEM;
@@ -132,13 +130,10 @@ nv04_display_create(struct drm_device *dev)
 void
 nv04_display_destroy(struct drm_device *dev)
 {
-       struct nouveau_drm *drm = nouveau_drm(dev);
        struct nv04_display *disp = nv04_display(dev);
        struct drm_encoder *encoder;
        struct drm_crtc *crtc;
 
-       NV_DEBUG(drm, "\n");
-
        /* Turn every CRTC off. */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                struct drm_mode_set modeset = {
index ce752bf..7ae7f97 100644 (file)
@@ -155,6 +155,8 @@ nv10_fence_destroy(struct nouveau_drm *drm)
 {
        struct nv10_fence_priv *priv = drm->fence;
        nouveau_bo_unmap(priv->bo);
+       if (priv->bo)
+               nouveau_bo_unpin(priv->bo);
        nouveau_bo_ref(NULL, &priv->bo);
        drm->fence = NULL;
        kfree(priv);
@@ -183,8 +185,11 @@ nv10_fence_create(struct nouveau_drm *drm)
                                     0, 0x0000, NULL, &priv->bo);
                if (!ret) {
                        ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
-                       if (!ret)
+                       if (!ret) {
                                ret = nouveau_bo_map(priv->bo);
+                               if (ret)
+                                       nouveau_bo_unpin(priv->bo);
+                       }
                        if (ret)
                                nouveau_bo_ref(NULL, &priv->bo);
                }
index 897b636..2ca276a 100644 (file)
@@ -195,7 +195,7 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
                break;
        }
 
-       drm_connector_property_set_value(connector,
+       drm_object_property_set_value(&connector->base,
                                         conf->tv_subconnector_property,
                                         tv_enc->subconnector);
 
@@ -672,25 +672,25 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
 
        drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
 
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                        conf->tv_select_subconnector_property,
                                        tv_enc->select_subconnector);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                        conf->tv_subconnector_property,
                                        tv_enc->subconnector);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                        conf->tv_mode_property,
                                        tv_enc->tv_norm);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                        conf->tv_flicker_reduction_property,
                                        tv_enc->flicker);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                        conf->tv_saturation_property,
                                        tv_enc->saturation);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                        conf->tv_hue_property,
                                        tv_enc->hue);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                        conf->tv_overscan_property,
                                        tv_enc->overscan);
 
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
deleted file mode 100644 (file)
index 222de77..0000000
+++ /dev/null
@@ -1,764 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "nouveau_reg.h"
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_gem.h"
-#include "nouveau_hw.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-#include "nouveau_connector.h"
-#include "nv50_display.h"
-
-#include <subdev/clock.h>
-
-static void
-nv50_crtc_lut_load(struct drm_crtc *crtc)
-{
-       struct nouveau_drm *drm = nouveau_drm(crtc->dev);
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
-       int i;
-
-       NV_DEBUG(drm, "\n");
-
-       for (i = 0; i < 256; i++) {
-               writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
-               writew(nv_crtc->lut.g[i] >> 2, lut + 8*i + 2);
-               writew(nv_crtc->lut.b[i] >> 2, lut + 8*i + 4);
-       }
-
-       if (nv_crtc->lut.depth == 30) {
-               writew(nv_crtc->lut.r[i - 1] >> 2, lut + 8*i + 0);
-               writew(nv_crtc->lut.g[i - 1] >> 2, lut + 8*i + 2);
-               writew(nv_crtc->lut.b[i - 1] >> 2, lut + 8*i + 4);
-       }
-}
-
-int
-nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
-{
-       struct drm_device *dev = nv_crtc->base.dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_channel *evo = nv50_display(dev)->master;
-       int index = nv_crtc->index, ret;
-
-       NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-       NV_DEBUG(drm, "%s\n", blanked ? "blanked" : "unblanked");
-
-       if (blanked) {
-               nv_crtc->cursor.hide(nv_crtc, false);
-
-               ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 7 : 5);
-               if (ret) {
-                       NV_ERROR(drm, "no space while blanking crtc\n");
-                       return ret;
-               }
-               BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
-               OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
-               OUT_RING(evo, 0);
-               if (nv_device(drm->device)->chipset != 0x50) {
-                       BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
-                       OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
-               }
-
-               BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
-               OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
-       } else {
-               if (nv_crtc->cursor.visible)
-                       nv_crtc->cursor.show(nv_crtc, false);
-               else
-                       nv_crtc->cursor.hide(nv_crtc, false);
-
-               ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 10 : 8);
-               if (ret) {
-                       NV_ERROR(drm, "no space while unblanking crtc\n");
-                       return ret;
-               }
-               BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
-               OUT_RING(evo, nv_crtc->lut.depth == 8 ?
-                               NV50_EVO_CRTC_CLUT_MODE_OFF :
-                               NV50_EVO_CRTC_CLUT_MODE_ON);
-               OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
-               if (nv_device(drm->device)->chipset != 0x50) {
-                       BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
-                       OUT_RING(evo, NvEvoVRAM);
-               }
-
-               BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
-               OUT_RING(evo, nv_crtc->fb.offset >> 8);
-               OUT_RING(evo, 0);
-               BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
-               if (nv_device(drm->device)->chipset != 0x50)
-                       if (nv_crtc->fb.tile_flags == 0x7a00 ||
-                           nv_crtc->fb.tile_flags == 0xfe00)
-                               OUT_RING(evo, NvEvoFB32);
-                       else
-                       if (nv_crtc->fb.tile_flags == 0x7000)
-                               OUT_RING(evo, NvEvoFB16);
-                       else
-                               OUT_RING(evo, NvEvoVRAM_LP);
-               else
-                       OUT_RING(evo, NvEvoVRAM_LP);
-       }
-
-       nv_crtc->fb.blanked = blanked;
-       return 0;
-}
-
-static int
-nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
-{
-       struct nouveau_channel *evo = nv50_display(nv_crtc->base.dev)->master;
-       struct nouveau_connector *nv_connector;
-       struct drm_connector *connector;
-       int head = nv_crtc->index, ret;
-       u32 mode = 0x00;
-
-       nv_connector = nouveau_crtc_connector_get(nv_crtc);
-       connector = &nv_connector->base;
-       if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
-               if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
-                       mode = DITHERING_MODE_DYNAMIC2X2;
-       } else {
-               mode = nv_connector->dithering_mode;
-       }
-
-       if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
-               if (connector->display_info.bpc >= 8)
-                       mode |= DITHERING_DEPTH_8BPC;
-       } else {
-               mode |= nv_connector->dithering_depth;
-       }
-
-       ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
-       if (ret == 0) {
-               BEGIN_NV04(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
-               OUT_RING  (evo, mode);
-               if (update) {
-                       BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
-                       OUT_RING  (evo, 0);
-                       FIRE_RING (evo);
-               }
-       }
-
-       return ret;
-}
-
-static int
-nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
-{
-       struct drm_device *dev = nv_crtc->base.dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_channel *evo = nv50_display(dev)->master;
-       int ret;
-       int adj;
-       u32 hue, vib;
-
-       NV_DEBUG(drm, "vibrance = %i, hue = %i\n",
-                    nv_crtc->color_vibrance, nv_crtc->vibrant_hue);
-
-       ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
-       if (ret) {
-               NV_ERROR(drm, "no space while setting color vibrance\n");
-               return ret;
-       }
-
-       adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
-       vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
-
-       hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
-
-       BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
-       OUT_RING  (evo, (hue << 20) | (vib << 8));
-
-       if (update) {
-               BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
-               OUT_RING  (evo, 0);
-               FIRE_RING (evo);
-       }
-
-       return 0;
-}
-
-struct nouveau_connector *
-nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
-{
-       struct drm_device *dev = nv_crtc->base.dev;
-       struct drm_connector *connector;
-       struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
-
-       /* The safest approach is to find an encoder with the right crtc, that
-        * is also linked to a connector. */
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               if (connector->encoder)
-                       if (connector->encoder->crtc == crtc)
-                               return nouveau_connector(connector);
-       }
-
-       return NULL;
-}
-
-static int
-nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
-{
-       struct nouveau_connector *nv_connector;
-       struct drm_crtc *crtc = &nv_crtc->base;
-       struct drm_device *dev = crtc->dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_channel *evo = nv50_display(dev)->master;
-       struct drm_display_mode *umode = &crtc->mode;
-       struct drm_display_mode *omode;
-       int scaling_mode, ret;
-       u32 ctrl = 0, oX, oY;
-
-       NV_DEBUG(drm, "\n");
-
-       nv_connector = nouveau_crtc_connector_get(nv_crtc);
-       if (!nv_connector || !nv_connector->native_mode) {
-               NV_ERROR(drm, "no native mode, forcing panel scaling\n");
-               scaling_mode = DRM_MODE_SCALE_NONE;
-       } else {
-               scaling_mode = nv_connector->scaling_mode;
-       }
-
-       /* start off at the resolution we programmed the crtc for, this
-        * effectively handles NONE/FULL scaling
-        */
-       if (scaling_mode != DRM_MODE_SCALE_NONE)
-               omode = nv_connector->native_mode;
-       else
-               omode = umode;
-
-       oX = omode->hdisplay;
-       oY = omode->vdisplay;
-       if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
-               oY *= 2;
-
-       /* add overscan compensation if necessary, will keep the aspect
-        * ratio the same as the backend mode unless overridden by the
-        * user setting both hborder and vborder properties.
-        */
-       if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
-                            (nv_connector->underscan == UNDERSCAN_AUTO &&
-                             nv_connector->edid &&
-                             drm_detect_hdmi_monitor(nv_connector->edid)))) {
-               u32 bX = nv_connector->underscan_hborder;
-               u32 bY = nv_connector->underscan_vborder;
-               u32 aspect = (oY << 19) / oX;
-
-               if (bX) {
-                       oX -= (bX * 2);
-                       if (bY) oY -= (bY * 2);
-                       else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
-               } else {
-                       oX -= (oX >> 4) + 32;
-                       if (bY) oY -= (bY * 2);
-                       else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
-               }
-       }
-
-       /* handle CENTER/ASPECT scaling, taking into account the areas
-        * removed already for overscan compensation
-        */
-       switch (scaling_mode) {
-       case DRM_MODE_SCALE_CENTER:
-               oX = min((u32)umode->hdisplay, oX);
-               oY = min((u32)umode->vdisplay, oY);
-               /* fall-through */
-       case DRM_MODE_SCALE_ASPECT:
-               if (oY < oX) {
-                       u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
-                       oX = ((oY * aspect) + (aspect / 2)) >> 19;
-               } else {
-                       u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
-                       oY = ((oX * aspect) + (aspect / 2)) >> 19;
-               }
-               break;
-       default:
-               break;
-       }
-
-       if (umode->hdisplay != oX || umode->vdisplay != oY ||
-           umode->flags & DRM_MODE_FLAG_INTERLACE ||
-           umode->flags & DRM_MODE_FLAG_DBLSCAN)
-               ctrl |= NV50_EVO_CRTC_SCALE_CTRL_ACTIVE;
-
-       ret = RING_SPACE(evo, 5);
-       if (ret)
-               return ret;
-
-       BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
-       OUT_RING  (evo, ctrl);
-       BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
-       OUT_RING  (evo, oY << 16 | oX);
-       OUT_RING  (evo, oY << 16 | oX);
-
-       if (update) {
-               nv50_display_flip_stop(crtc);
-               nv50_display_sync(dev);
-               nv50_display_flip_next(crtc, crtc->fb, NULL);
-       }
-
-       return 0;
-}
-
-int
-nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_clock *clk = nouveau_clock(device);
-
-       return clk->pll_set(clk, PLL_VPLL0 + head, pclk);
-}
-
-static void
-nv50_crtc_destroy(struct drm_crtc *crtc)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       struct nouveau_drm *drm = nouveau_drm(crtc->dev);
-
-       NV_DEBUG(drm, "\n");
-
-       nouveau_bo_unmap(nv_crtc->lut.nvbo);
-       nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
-       nouveau_bo_unmap(nv_crtc->cursor.nvbo);
-       nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
-       drm_crtc_cleanup(&nv_crtc->base);
-       kfree(nv_crtc);
-}
-
-int
-nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
-                    uint32_t buffer_handle, uint32_t width, uint32_t height)
-{
-       struct drm_device *dev = crtc->dev;
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       struct nouveau_bo *cursor = NULL;
-       struct drm_gem_object *gem;
-       int ret = 0, i;
-
-       if (!buffer_handle) {
-               nv_crtc->cursor.hide(nv_crtc, true);
-               return 0;
-       }
-
-       if (width != 64 || height != 64)
-               return -EINVAL;
-
-       gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
-       if (!gem)
-               return -ENOENT;
-       cursor = nouveau_gem_object(gem);
-
-       ret = nouveau_bo_map(cursor);
-       if (ret)
-               goto out;
-
-       /* The simple will do for now. */
-       for (i = 0; i < 64 * 64; i++)
-               nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, nouveau_bo_rd32(cursor, i));
-
-       nouveau_bo_unmap(cursor);
-
-       nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
-       nv_crtc->cursor.show(nv_crtc, true);
-
-out:
-       drm_gem_object_unreference_unlocked(gem);
-       return ret;
-}
-
-int
-nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
-       nv_crtc->cursor.set_pos(nv_crtc, x, y);
-       return 0;
-}
-
-static void
-nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
-                   uint32_t start, uint32_t size)
-{
-       int end = (start + size > 256) ? 256 : start + size, i;
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
-       for (i = start; i < end; i++) {
-               nv_crtc->lut.r[i] = r[i];
-               nv_crtc->lut.g[i] = g[i];
-               nv_crtc->lut.b[i] = b[i];
-       }
-
-       /* We need to know the depth before we upload, but it's possible to
-        * get called before a framebuffer is bound.  If this is the case,
-        * mark the lut values as dirty by setting depth==0, and it'll be
-        * uploaded on the first mode_set_base()
-        */
-       if (!nv_crtc->base.fb) {
-               nv_crtc->lut.depth = 0;
-               return;
-       }
-
-       nv50_crtc_lut_load(crtc);
-}
-
-static void
-nv50_crtc_save(struct drm_crtc *crtc)
-{
-       struct nouveau_drm *drm = nouveau_drm(crtc->dev);
-       NV_ERROR(drm, "!!\n");
-}
-
-static void
-nv50_crtc_restore(struct drm_crtc *crtc)
-{
-       struct nouveau_drm *drm = nouveau_drm(crtc->dev);
-       NV_ERROR(drm, "!!\n");
-}
-
-static const struct drm_crtc_funcs nv50_crtc_funcs = {
-       .save = nv50_crtc_save,
-       .restore = nv50_crtc_restore,
-       .cursor_set = nv50_crtc_cursor_set,
-       .cursor_move = nv50_crtc_cursor_move,
-       .gamma_set = nv50_crtc_gamma_set,
-       .set_config = drm_crtc_helper_set_config,
-       .page_flip = nouveau_crtc_page_flip,
-       .destroy = nv50_crtc_destroy,
-};
-
-static void
-nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-}
-
-static void
-nv50_crtc_prepare(struct drm_crtc *crtc)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       struct drm_device *dev = crtc->dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-
-       NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-
-       nv50_display_flip_stop(crtc);
-       drm_vblank_pre_modeset(dev, nv_crtc->index);
-       nv50_crtc_blank(nv_crtc, true);
-}
-
-static void
-nv50_crtc_commit(struct drm_crtc *crtc)
-{
-       struct drm_device *dev = crtc->dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
-       NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-
-       nv50_crtc_blank(nv_crtc, false);
-       drm_vblank_post_modeset(dev, nv_crtc->index);
-       nv50_display_sync(dev);
-       nv50_display_flip_next(crtc, crtc->fb, NULL);
-}
-
-static bool
-nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
-                    struct drm_display_mode *adjusted_mode)
-{
-       return true;
-}
-
-static int
-nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
-                          struct drm_framebuffer *passed_fb,
-                          int x, int y, bool atomic)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       struct drm_device *dev = nv_crtc->base.dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_channel *evo = nv50_display(dev)->master;
-       struct drm_framebuffer *drm_fb;
-       struct nouveau_framebuffer *fb;
-       int ret;
-
-       NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-
-       /* no fb bound */
-       if (!atomic && !crtc->fb) {
-               NV_DEBUG(drm, "No FB bound\n");
-               return 0;
-       }
-
-       /* If atomic, we want to switch to the fb we were passed, so
-        * now we update pointers to do that.  (We don't pin; just
-        * assume we're already pinned and update the base address.)
-        */
-       if (atomic) {
-               drm_fb = passed_fb;
-               fb = nouveau_framebuffer(passed_fb);
-       } else {
-               drm_fb = crtc->fb;
-               fb = nouveau_framebuffer(crtc->fb);
-               /* If not atomic, we can go ahead and pin, and unpin the
-                * old fb we were passed.
-                */
-               ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
-               if (ret)
-                       return ret;
-
-               if (passed_fb) {
-                       struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
-                       nouveau_bo_unpin(ofb->nvbo);
-               }
-       }
-
-       nv_crtc->fb.offset = fb->nvbo->bo.offset;
-       nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
-       nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
-       if (!nv_crtc->fb.blanked && nv_device(drm->device)->chipset != 0x50) {
-               ret = RING_SPACE(evo, 2);
-               if (ret)
-                       return ret;
-
-               BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
-               OUT_RING  (evo, fb->r_dma);
-       }
-
-       ret = RING_SPACE(evo, 12);
-       if (ret)
-               return ret;
-
-       BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
-       OUT_RING  (evo, nv_crtc->fb.offset >> 8);
-       OUT_RING  (evo, 0);
-       OUT_RING  (evo, (drm_fb->height << 16) | drm_fb->width);
-       OUT_RING  (evo, fb->r_pitch);
-       OUT_RING  (evo, fb->r_format);
-
-       BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
-       OUT_RING  (evo, fb->base.depth == 8 ?
-                  NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
-
-       BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
-       OUT_RING  (evo, (y << 16) | x);
-
-       if (nv_crtc->lut.depth != fb->base.depth) {
-               nv_crtc->lut.depth = fb->base.depth;
-               nv50_crtc_lut_load(crtc);
-       }
-
-       return 0;
-}
-
-static int
-nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
-                  struct drm_display_mode *mode, int x, int y,
-                  struct drm_framebuffer *old_fb)
-{
-       struct drm_device *dev = crtc->dev;
-       struct nouveau_channel *evo = nv50_display(dev)->master;
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       u32 head = nv_crtc->index * 0x400;
-       u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
-       u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
-       u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
-       u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
-       u32 vblan2e = 0, vblan2s = 1;
-       int ret;
-
-       /* hw timing description looks like this:
-        *
-        * <sync> <back porch> <---------display---------> <front porch>
-        * ______
-        *       |____________|---------------------------|____________|
-        *
-        *       ^ synce      ^ blanke                    ^ blanks     ^ active
-        *
-        * interlaced modes also have 2 additional values pointing at the end
-        * and start of the next field's blanking period.
-        */
-
-       hactive = mode->htotal;
-       hsynce  = mode->hsync_end - mode->hsync_start - 1;
-       hbackp  = mode->htotal - mode->hsync_end;
-       hblanke = hsynce + hbackp;
-       hfrontp = mode->hsync_start - mode->hdisplay;
-       hblanks = mode->htotal - hfrontp - 1;
-
-       vactive = mode->vtotal * vscan / ilace;
-       vsynce  = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
-       vbackp  = (mode->vtotal - mode->vsync_end) * vscan / ilace;
-       vblanke = vsynce + vbackp;
-       vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
-       vblanks = vactive - vfrontp - 1;
-       if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
-               vblan2e = vactive + vsynce + vbackp;
-               vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
-               vactive = (vactive * 2) + 1;
-       }
-
-       ret = RING_SPACE(evo, 18);
-       if (ret == 0) {
-               BEGIN_NV04(evo, 0, 0x0804 + head, 2);
-               OUT_RING  (evo, 0x00800000 | mode->clock);
-               OUT_RING  (evo, (ilace == 2) ? 2 : 0);
-               BEGIN_NV04(evo, 0, 0x0810 + head, 6);
-               OUT_RING  (evo, 0x00000000); /* border colour */
-               OUT_RING  (evo, (vactive << 16) | hactive);
-               OUT_RING  (evo, ( vsynce << 16) | hsynce);
-               OUT_RING  (evo, (vblanke << 16) | hblanke);
-               OUT_RING  (evo, (vblanks << 16) | hblanks);
-               OUT_RING  (evo, (vblan2e << 16) | vblan2s);
-               BEGIN_NV04(evo, 0, 0x082c + head, 1);
-               OUT_RING  (evo, 0x00000000);
-               BEGIN_NV04(evo, 0, 0x0900 + head, 1);
-               OUT_RING  (evo, 0x00000311); /* makes sync channel work */
-               BEGIN_NV04(evo, 0, 0x08c8 + head, 1);
-               OUT_RING  (evo, (umode->vdisplay << 16) | umode->hdisplay);
-               BEGIN_NV04(evo, 0, 0x08d4 + head, 1);
-               OUT_RING  (evo, 0x00000000); /* screen position */
-       }
-
-       nv_crtc->set_dither(nv_crtc, false);
-       nv_crtc->set_scale(nv_crtc, false);
-       nv_crtc->set_color_vibrance(nv_crtc, false);
-
-       return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
-}
-
-static int
-nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
-                       struct drm_framebuffer *old_fb)
-{
-       int ret;
-
-       nv50_display_flip_stop(crtc);
-       ret = nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
-       if (ret)
-               return ret;
-
-       ret = nv50_display_sync(crtc->dev);
-       if (ret)
-               return ret;
-
-       return nv50_display_flip_next(crtc, crtc->fb, NULL);
-}
-
-static int
-nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
-                              struct drm_framebuffer *fb,
-                              int x, int y, enum mode_set_atomic state)
-{
-       int ret;
-
-       nv50_display_flip_stop(crtc);
-       ret = nv50_crtc_do_mode_set_base(crtc, fb, x, y, true);
-       if (ret)
-               return ret;
-
-       return nv50_display_sync(crtc->dev);
-}
-
-static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
-       .dpms = nv50_crtc_dpms,
-       .prepare = nv50_crtc_prepare,
-       .commit = nv50_crtc_commit,
-       .mode_fixup = nv50_crtc_mode_fixup,
-       .mode_set = nv50_crtc_mode_set,
-       .mode_set_base = nv50_crtc_mode_set_base,
-       .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
-       .load_lut = nv50_crtc_lut_load,
-};
-
-int
-nv50_crtc_create(struct drm_device *dev, int index)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_crtc *nv_crtc = NULL;
-       int ret, i;
-
-       NV_DEBUG(drm, "\n");
-
-       nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
-       if (!nv_crtc)
-               return -ENOMEM;
-
-       nv_crtc->index = index;
-       nv_crtc->set_dither = nv50_crtc_set_dither;
-       nv_crtc->set_scale = nv50_crtc_set_scale;
-       nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance;
-       nv_crtc->color_vibrance = 50;
-       nv_crtc->vibrant_hue = 0;
-       nv_crtc->lut.depth = 0;
-       for (i = 0; i < 256; i++) {
-               nv_crtc->lut.r[i] = i << 8;
-               nv_crtc->lut.g[i] = i << 8;
-               nv_crtc->lut.b[i] = i << 8;
-       }
-
-       drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
-       drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
-       drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
-
-       ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM,
-                            0, 0x0000, NULL, &nv_crtc->lut.nvbo);
-       if (!ret) {
-               ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
-               if (!ret)
-                       ret = nouveau_bo_map(nv_crtc->lut.nvbo);
-               if (ret)
-                       nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
-       }
-
-       if (ret)
-               goto out;
-
-
-       ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
-                            0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
-       if (!ret) {
-               ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
-               if (!ret)
-                       ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
-               if (ret)
-                       nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
-       }
-
-       if (ret)
-               goto out;
-
-       nv50_cursor_init(nv_crtc);
-out:
-       if (ret)
-               nv50_crtc_destroy(&nv_crtc->base);
-       return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
deleted file mode 100644 (file)
index 223da11..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_crtc.h"
-#include "nv50_display.h"
-
-static void
-nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
-{
-       struct drm_device *dev = nv_crtc->base.dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_channel *evo = nv50_display(dev)->master;
-       int ret;
-
-       NV_DEBUG(drm, "\n");
-
-       if (update && nv_crtc->cursor.visible)
-               return;
-
-       ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
-       if (ret) {
-               NV_ERROR(drm, "no space while unhiding cursor\n");
-               return;
-       }
-
-       if (nv_device(drm->device)->chipset != 0x50) {
-               BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
-               OUT_RING(evo, NvEvoVRAM);
-       }
-       BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
-       OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
-       OUT_RING(evo, nv_crtc->cursor.offset >> 8);
-
-       if (update) {
-               BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
-               OUT_RING(evo, 0);
-               FIRE_RING(evo);
-               nv_crtc->cursor.visible = true;
-       }
-}
-
-static void
-nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
-{
-       struct drm_device *dev = nv_crtc->base.dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_channel *evo = nv50_display(dev)->master;
-       int ret;
-
-       NV_DEBUG(drm, "\n");
-
-       if (update && !nv_crtc->cursor.visible)
-               return;
-
-       ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
-       if (ret) {
-               NV_ERROR(drm, "no space while hiding cursor\n");
-               return;
-       }
-       BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
-       OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
-       OUT_RING(evo, 0);
-       if (nv_device(drm->device)->chipset != 0x50) {
-               BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
-               OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
-       }
-
-       if (update) {
-               BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
-               OUT_RING(evo, 0);
-               FIRE_RING(evo);
-               nv_crtc->cursor.visible = false;
-       }
-}
-
-static void
-nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
-{
-       struct nouveau_device *device = nouveau_dev(nv_crtc->base.dev);
-
-       nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
-       nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
-               ((y & 0xFFFF) << 16) | (x & 0xFFFF));
-       /* Needed to make the cursor move. */
-       nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0);
-}
-
-static void
-nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
-{
-       if (offset == nv_crtc->cursor.offset)
-               return;
-
-       nv_crtc->cursor.offset = offset;
-       if (nv_crtc->cursor.visible) {
-               nv_crtc->cursor.visible = false;
-               nv_crtc->cursor.show(nv_crtc, true);
-       }
-}
-
-int
-nv50_cursor_init(struct nouveau_crtc *nv_crtc)
-{
-       nv_crtc->cursor.set_offset = nv50_cursor_set_offset;
-       nv_crtc->cursor.set_pos = nv50_cursor_set_pos;
-       nv_crtc->cursor.hide = nv50_cursor_hide;
-       nv_crtc->cursor.show = nv50_cursor_show;
-       return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
deleted file mode 100644 (file)
index 6a30a17..0000000
+++ /dev/null
@@ -1,321 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
-#include "nouveau_reg.h"
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_encoder.h"
-#include "nouveau_connector.h"
-#include "nouveau_crtc.h"
-#include "nv50_display.h"
-
-#include <subdev/timer.h>
-
-static void
-nv50_dac_disconnect(struct drm_encoder *encoder)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_channel *evo = nv50_display(dev)->master;
-       int ret;
-
-       if (!nv_encoder->crtc)
-               return;
-       nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
-
-       NV_DEBUG(drm, "Disconnecting DAC %d\n", nv_encoder->or);
-
-       ret = RING_SPACE(evo, 4);
-       if (ret) {
-               NV_ERROR(drm, "no space while disconnecting DAC\n");
-               return;
-       }
-       BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
-       OUT_RING  (evo, 0);
-       BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
-       OUT_RING  (evo, 0);
-
-       nv_encoder->crtc = NULL;
-}
-
-static enum drm_connector_status
-nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_device *device = nouveau_dev(encoder->dev);
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       enum drm_connector_status status = connector_status_disconnected;
-       uint32_t dpms_state, load_pattern, load_state;
-       int or = nv_encoder->or;
-
-       nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001);
-       dpms_state = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or));
-
-       nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
-               0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-       if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
-                    NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
-               NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
-               NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
-                         nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
-               return status;
-       }
-
-       /* Use bios provided value if possible. */
-       if (drm->vbios.dactestval) {
-               load_pattern = drm->vbios.dactestval;
-               NV_DEBUG(drm, "Using bios provided load_pattern of %d\n",
-                         load_pattern);
-       } else {
-               load_pattern = 340;
-               NV_DEBUG(drm, "Using default load_pattern of %d\n",
-                        load_pattern);
-       }
-
-       nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or),
-               NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern);
-       mdelay(45); /* give it some time to process */
-       load_state = nv_rd32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or));
-
-       nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0);
-       nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state |
-               NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-
-       if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) ==
-                         NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT)
-               status = connector_status_connected;
-
-       if (status == connector_status_connected)
-               NV_DEBUG(drm, "Load was detected on output with or %d\n", or);
-       else
-               NV_DEBUG(drm, "Load was not detected on output with or %d\n", or);
-
-       return status;
-}
-
-static void
-nv50_dac_dpms(struct drm_encoder *encoder, int mode)
-{
-       struct nouveau_device *device = nouveau_dev(encoder->dev);
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       uint32_t val;
-       int or = nv_encoder->or;
-
-       NV_DEBUG(drm, "or %d mode %d\n", or, mode);
-
-       /* wait for it to be done */
-       if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
-                    NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
-               NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
-               NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
-                        nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
-               return;
-       }
-
-       val = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F;
-
-       if (mode != DRM_MODE_DPMS_ON)
-               val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED;
-
-       switch (mode) {
-       case DRM_MODE_DPMS_STANDBY:
-               val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
-               break;
-       case DRM_MODE_DPMS_SUSPEND:
-               val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
-               break;
-       case DRM_MODE_DPMS_OFF:
-               val |= NV50_PDISPLAY_DAC_DPMS_CTRL_OFF;
-               val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
-               val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
-               break;
-       default:
-               break;
-       }
-
-       nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val |
-               NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-}
-
-static void
-nv50_dac_save(struct drm_encoder *encoder)
-{
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       NV_ERROR(drm, "!!\n");
-}
-
-static void
-nv50_dac_restore(struct drm_encoder *encoder)
-{
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       NV_ERROR(drm, "!!\n");
-}
-
-static bool
-nv50_dac_mode_fixup(struct drm_encoder *encoder,
-                   const struct drm_display_mode *mode,
-                   struct drm_display_mode *adjusted_mode)
-{
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_connector *connector;
-
-       NV_DEBUG(drm, "or %d\n", nv_encoder->or);
-
-       connector = nouveau_encoder_connector_get(nv_encoder);
-       if (!connector) {
-               NV_ERROR(drm, "Encoder has no connector\n");
-               return false;
-       }
-
-       if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
-            connector->native_mode)
-               drm_mode_copy(adjusted_mode, connector->native_mode);
-
-       return true;
-}
-
-static void
-nv50_dac_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
-                 struct drm_display_mode *adjusted_mode)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_channel *evo = nv50_display(dev)->master;
-       struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
-       uint32_t mode_ctl = 0, mode_ctl2 = 0;
-       int ret;
-
-       NV_DEBUG(drm, "or %d type %d crtc %d\n",
-                    nv_encoder->or, nv_encoder->dcb->type, crtc->index);
-
-       nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
-
-       if (crtc->index == 1)
-               mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1;
-       else
-               mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0;
-
-       /* Lacking a working tv-out, this is not a 100% sure. */
-       if (nv_encoder->dcb->type == DCB_OUTPUT_ANALOG)
-               mode_ctl |= 0x40;
-       else
-       if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
-               mode_ctl |= 0x100;
-
-       if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
-               mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC;
-
-       if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
-               mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC;
-
-       ret = RING_SPACE(evo, 3);
-       if (ret) {
-               NV_ERROR(drm, "no space while connecting DAC\n");
-               return;
-       }
-       BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
-       OUT_RING(evo, mode_ctl);
-       OUT_RING(evo, mode_ctl2);
-
-       nv_encoder->crtc = encoder->crtc;
-}
-
-static struct drm_crtc *
-nv50_dac_crtc_get(struct drm_encoder *encoder)
-{
-       return nouveau_encoder(encoder)->crtc;
-}
-
-static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
-       .dpms = nv50_dac_dpms,
-       .save = nv50_dac_save,
-       .restore = nv50_dac_restore,
-       .mode_fixup = nv50_dac_mode_fixup,
-       .prepare = nv50_dac_disconnect,
-       .commit = nv50_dac_commit,
-       .mode_set = nv50_dac_mode_set,
-       .get_crtc = nv50_dac_crtc_get,
-       .detect = nv50_dac_detect,
-       .disable = nv50_dac_disconnect
-};
-
-static void
-nv50_dac_destroy(struct drm_encoder *encoder)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-
-       if (!encoder)
-               return;
-
-       NV_DEBUG(drm, "\n");
-
-       drm_encoder_cleanup(encoder);
-       kfree(nv_encoder);
-}
-
-static const struct drm_encoder_funcs nv50_dac_encoder_funcs = {
-       .destroy = nv50_dac_destroy,
-};
-
-int
-nv50_dac_create(struct drm_connector *connector, struct dcb_output *entry)
-{
-       struct nouveau_encoder *nv_encoder;
-       struct drm_encoder *encoder;
-
-       nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
-       if (!nv_encoder)
-               return -ENOMEM;
-       encoder = to_drm_encoder(nv_encoder);
-
-       nv_encoder->dcb = entry;
-       nv_encoder->or = ffs(entry->or) - 1;
-
-       drm_encoder_init(connector->dev, encoder, &nv50_dac_encoder_funcs,
-                        DRM_MODE_ENCODER_DAC);
-       drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs);
-
-       encoder->possible_crtcs = entry->heads;
-       encoder->possible_clones = 0;
-
-       drm_mode_connector_attach_encoder(connector, encoder);
-       return 0;
-}
-
index f97b42c..3587408 100644 (file)
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
+       /*
+ * Copyright 2011 Red Hat Inc.
  *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
  *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
  *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
  *
+ * Authors: Ben Skeggs
  */
 
+#include <linux/dma-mapping.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
 #include "nouveau_drm.h"
 #include "nouveau_dma.h"
-
-#include "nv50_display.h"
-#include "nouveau_crtc.h"
-#include "nouveau_encoder.h"
+#include "nouveau_gem.h"
 #include "nouveau_connector.h"
-#include "nouveau_fbcon.h"
-#include <drm/drm_crtc_helper.h>
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
 #include "nouveau_fence.h"
+#include "nv50_display.h"
 
+#include <core/client.h>
 #include <core/gpuobj.h>
-#include <subdev/timer.h>
-
-static void nv50_display_bh(unsigned long);
+#include <core/class.h>
 
-static inline int
-nv50_sor_nr(struct drm_device *dev)
+#include <subdev/timer.h>
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+
+#define EVO_DMA_NR 9
+
+#define EVO_MASTER  (0x00)
+#define EVO_FLIP(c) (0x01 + (c))
+#define EVO_OVLY(c) (0x05 + (c))
+#define EVO_OIMM(c) (0x09 + (c))
+#define EVO_CURS(c) (0x0d + (c))
+
+/* offsets in shared sync bo of various structures */
+#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
+#define EVO_MAST_NTFY     EVO_SYNC(  0, 0x00)
+#define EVO_FLIP_SEM0(c)  EVO_SYNC((c), 0x00)
+#define EVO_FLIP_SEM1(c)  EVO_SYNC((c), 0x10)
+
+#define EVO_CORE_HANDLE      (0xd1500000)
+#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i))
+#define EVO_CHAN_OCLASS(t,c) ((nv_hclass(c) & 0xff00) | ((t) & 0x00ff))
+#define EVO_PUSH_HANDLE(t,i) (0xd15b0000 | (i) |                               \
+                             (((NV50_DISP_##t##_CLASS) & 0x00ff) << 8))
+
+/******************************************************************************
+ * EVO channel
+ *****************************************************************************/
+
+struct nv50_chan {
+       struct nouveau_object *user;
+       u32 handle;
+};
+
+static int
+nv50_chan_create(struct nouveau_object *core, u32 bclass, u8 head,
+                void *data, u32 size, struct nv50_chan *chan)
 {
-       struct nouveau_device *device = nouveau_dev(dev);
+       struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+       const u32 oclass = EVO_CHAN_OCLASS(bclass, core);
+       const u32 handle = EVO_CHAN_HANDLE(bclass, head);
+       int ret;
 
-       if (device->chipset  < 0x90 ||
-           device->chipset == 0x92 ||
-           device->chipset == 0xa0)
-               return 2;
+       ret = nouveau_object_new(client, EVO_CORE_HANDLE, handle,
+                                oclass, data, size, &chan->user);
+       if (ret)
+               return ret;
 
-       return 4;
+       chan->handle = handle;
+       return 0;
 }
 
-u32
-nv50_display_active_crtcs(struct drm_device *dev)
+static void
+nv50_chan_destroy(struct nouveau_object *core, struct nv50_chan *chan)
 {
-       struct nouveau_device *device = nouveau_dev(dev);
-       u32 mask = 0;
-       int i;
-
-       if (device->chipset  < 0x90 ||
-           device->chipset == 0x92 ||
-           device->chipset == 0xa0) {
-               for (i = 0; i < 2; i++)
-                       mask |= nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
-       } else {
-               for (i = 0; i < 4; i++)
-                       mask |= nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
-       }
+       struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+       if (chan->handle)
+               nouveau_object_del(client, EVO_CORE_HANDLE, chan->handle);
+}
 
-       for (i = 0; i < 3; i++)
-               mask |= nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
+/******************************************************************************
+ * PIO EVO channel
+ *****************************************************************************/
 
-       return mask & 3;
-}
+struct nv50_pioc {
+       struct nv50_chan base;
+};
 
-int
-nv50_display_early_init(struct drm_device *dev)
+static void
+nv50_pioc_destroy(struct nouveau_object *core, struct nv50_pioc *pioc)
 {
-       return 0;
+       nv50_chan_destroy(core, &pioc->base);
 }
 
-void
-nv50_display_late_takedown(struct drm_device *dev)
+static int
+nv50_pioc_create(struct nouveau_object *core, u32 bclass, u8 head,
+                void *data, u32 size, struct nv50_pioc *pioc)
 {
+       return nv50_chan_create(core, bclass, head, data, size, &pioc->base);
 }
 
-int
-nv50_display_sync(struct drm_device *dev)
-{
-       struct nv50_display *disp = nv50_display(dev);
-       struct nouveau_channel *evo = disp->master;
-       int ret;
-
-       ret = RING_SPACE(evo, 6);
-       if (ret == 0) {
-               BEGIN_NV04(evo, 0, 0x0084, 1);
-               OUT_RING  (evo, 0x80000000);
-               BEGIN_NV04(evo, 0, 0x0080, 1);
-               OUT_RING  (evo, 0);
-               BEGIN_NV04(evo, 0, 0x0084, 1);
-               OUT_RING  (evo, 0x00000000);
+/******************************************************************************
+ * DMA EVO channel
+ *****************************************************************************/
 
-               nv_wo32(disp->ramin, 0x2000, 0x00000000);
-               FIRE_RING (evo);
+struct nv50_dmac {
+       struct nv50_chan base;
+       dma_addr_t handle;
+       u32 *ptr;
+};
 
-               if (nv_wait_ne(disp->ramin, 0x2000, 0xffffffff, 0x00000000))
-                       return 0;
+static void
+nv50_dmac_destroy(struct nouveau_object *core, struct nv50_dmac *dmac)
+{
+       if (dmac->ptr) {
+               struct pci_dev *pdev = nv_device(core)->pdev;
+               pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle);
        }
 
-       return 0;
+       nv50_chan_destroy(core, &dmac->base);
 }
 
-int
-nv50_display_init(struct drm_device *dev)
+static int
+nv50_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
 {
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_channel *evo;
-       int ret, i;
-       u32 val;
-
-       NV_DEBUG(drm, "\n");
-
-       nv_wr32(device, 0x00610184, nv_rd32(device, 0x00614004));
-
-       /*
-        * I think the 0x006101XX range is some kind of main control area
-        * that enables things.
-        */
-       /* CRTC? */
-       for (i = 0; i < 2; i++) {
-               val = nv_rd32(device, 0x00616100 + (i * 0x800));
-               nv_wr32(device, 0x00610190 + (i * 0x10), val);
-               val = nv_rd32(device, 0x00616104 + (i * 0x800));
-               nv_wr32(device, 0x00610194 + (i * 0x10), val);
-               val = nv_rd32(device, 0x00616108 + (i * 0x800));
-               nv_wr32(device, 0x00610198 + (i * 0x10), val);
-               val = nv_rd32(device, 0x0061610c + (i * 0x800));
-               nv_wr32(device, 0x0061019c + (i * 0x10), val);
-       }
-
-       /* DAC */
-       for (i = 0; i < 3; i++) {
-               val = nv_rd32(device, 0x0061a000 + (i * 0x800));
-               nv_wr32(device, 0x006101d0 + (i * 0x04), val);
-       }
-
-       /* SOR */
-       for (i = 0; i < nv50_sor_nr(dev); i++) {
-               val = nv_rd32(device, 0x0061c000 + (i * 0x800));
-               nv_wr32(device, 0x006101e0 + (i * 0x04), val);
-       }
-
-       /* EXT */
-       for (i = 0; i < 3; i++) {
-               val = nv_rd32(device, 0x0061e000 + (i * 0x800));
-               nv_wr32(device, 0x006101f0 + (i * 0x04), val);
-       }
-
-       for (i = 0; i < 3; i++) {
-               nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
-                       NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-               nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
-       }
-
-       /* The precise purpose is unknown, i suspect it has something to do
-        * with text mode.
-        */
-       if (nv_rd32(device, NV50_PDISPLAY_INTR_1) & 0x100) {
-               nv_wr32(device, NV50_PDISPLAY_INTR_1, 0x100);
-               nv_wr32(device, 0x006194e8, nv_rd32(device, 0x006194e8) & ~1);
-               if (!nv_wait(device, 0x006194e8, 2, 0)) {
-                       NV_ERROR(drm, "timeout: (0x6194e8 & 2) != 0\n");
-                       NV_ERROR(drm, "0x6194e8 = 0x%08x\n",
-                                               nv_rd32(device, 0x6194e8));
-                       return -EBUSY;
-               }
-       }
-
-       for (i = 0; i < 2; i++) {
-               nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
-               if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
-                            NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
-                       NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
-                       NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
-                                nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
-                       return -EBUSY;
-               }
-
-               nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
-                       NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
-               if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
-                            NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
-                            NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
-                       NV_ERROR(drm, "timeout: "
-                                     "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i);
-                       NV_ERROR(drm, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
-                                nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
-                       return -EBUSY;
-               }
-       }
-
-       nv_wr32(device, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
-       nv_mask(device, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
-       nv_wr32(device, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
-       nv_mask(device, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
-       nv_wr32(device, NV50_PDISPLAY_INTR_EN_1,
-                    NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 |
-                    NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
-                    NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
-
-       ret = nv50_evo_init(dev);
+       struct nouveau_fb *pfb = nouveau_fb(core);
+       struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+       struct nouveau_object *object;
+       int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+                                    NV_DMA_IN_MEMORY_CLASS,
+                                    &(struct nv_dma_class) {
+                                       .flags = NV_DMA_TARGET_VRAM |
+                                                NV_DMA_ACCESS_RDWR,
+                                       .start = 0,
+                                       .limit = pfb->ram.size - 1,
+                                       .conf0 = NV50_DMA_CONF0_ENABLE |
+                                                NV50_DMA_CONF0_PART_256,
+                                    }, sizeof(struct nv_dma_class), &object);
        if (ret)
                return ret;
-       evo = nv50_display(dev)->master;
-
-       nv_wr32(device, NV50_PDISPLAY_OBJECTS, (nv50_display(dev)->ramin->addr >> 8) | 9);
 
-       ret = RING_SPACE(evo, 3);
+       ret = nouveau_object_new(client, parent, NvEvoFB16,
+                                NV_DMA_IN_MEMORY_CLASS,
+                                &(struct nv_dma_class) {
+                                       .flags = NV_DMA_TARGET_VRAM |
+                                                NV_DMA_ACCESS_RDWR,
+                                       .start = 0,
+                                       .limit = pfb->ram.size - 1,
+                                       .conf0 = NV50_DMA_CONF0_ENABLE | 0x70 |
+                                                NV50_DMA_CONF0_PART_256,
+                                }, sizeof(struct nv_dma_class), &object);
        if (ret)
                return ret;
-       BEGIN_NV04(evo, 0, NV50_EVO_UNK84, 2);
-       OUT_RING  (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
-       OUT_RING  (evo, NvEvoSync);
 
-       return nv50_display_sync(dev);
+       ret = nouveau_object_new(client, parent, NvEvoFB32,
+                                NV_DMA_IN_MEMORY_CLASS,
+                                &(struct nv_dma_class) {
+                                       .flags = NV_DMA_TARGET_VRAM |
+                                                NV_DMA_ACCESS_RDWR,
+                                       .start = 0,
+                                       .limit = pfb->ram.size - 1,
+                                       .conf0 = NV50_DMA_CONF0_ENABLE | 0x7a |
+                                                NV50_DMA_CONF0_PART_256,
+                                }, sizeof(struct nv_dma_class), &object);
+       return ret;
 }
 
-void
-nv50_display_fini(struct drm_device *dev)
+static int
+nvc0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
 {
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nv50_display *disp = nv50_display(dev);
-       struct nouveau_channel *evo = disp->master;
-       struct drm_crtc *drm_crtc;
-       int ret, i;
+       struct nouveau_fb *pfb = nouveau_fb(core);
+       struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+       struct nouveau_object *object;
+       int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+                                    NV_DMA_IN_MEMORY_CLASS,
+                                    &(struct nv_dma_class) {
+                                       .flags = NV_DMA_TARGET_VRAM |
+                                                NV_DMA_ACCESS_RDWR,
+                                       .start = 0,
+                                       .limit = pfb->ram.size - 1,
+                                       .conf0 = NVC0_DMA_CONF0_ENABLE,
+                                    }, sizeof(struct nv_dma_class), &object);
+       if (ret)
+               return ret;
 
-       NV_DEBUG(drm, "\n");
+       ret = nouveau_object_new(client, parent, NvEvoFB16,
+                                NV_DMA_IN_MEMORY_CLASS,
+                                &(struct nv_dma_class) {
+                                       .flags = NV_DMA_TARGET_VRAM |
+                                                NV_DMA_ACCESS_RDWR,
+                                       .start = 0,
+                                       .limit = pfb->ram.size - 1,
+                                       .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
+                                }, sizeof(struct nv_dma_class), &object);
+       if (ret)
+               return ret;
 
-       list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
-               struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
+       ret = nouveau_object_new(client, parent, NvEvoFB32,
+                                NV_DMA_IN_MEMORY_CLASS,
+                                &(struct nv_dma_class) {
+                                       .flags = NV_DMA_TARGET_VRAM |
+                                                NV_DMA_ACCESS_RDWR,
+                                       .start = 0,
+                                       .limit = pfb->ram.size - 1,
+                                       .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
+                                }, sizeof(struct nv_dma_class), &object);
+       return ret;
+}
 
-               nv50_crtc_blank(crtc, true);
-       }
+static int
+nvd0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
+{
+       struct nouveau_fb *pfb = nouveau_fb(core);
+       struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+       struct nouveau_object *object;
+       int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+                                    NV_DMA_IN_MEMORY_CLASS,
+                                    &(struct nv_dma_class) {
+                                       .flags = NV_DMA_TARGET_VRAM |
+                                                NV_DMA_ACCESS_RDWR,
+                                       .start = 0,
+                                       .limit = pfb->ram.size - 1,
+                                       .conf0 = NVD0_DMA_CONF0_ENABLE |
+                                                NVD0_DMA_CONF0_PAGE_LP,
+                                    }, sizeof(struct nv_dma_class), &object);
+       if (ret)
+               return ret;
 
-       ret = RING_SPACE(evo, 2);
-       if (ret == 0) {
-               BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
-               OUT_RING(evo, 0);
-       }
-       FIRE_RING(evo);
+       ret = nouveau_object_new(client, parent, NvEvoFB32,
+                                NV_DMA_IN_MEMORY_CLASS,
+                                &(struct nv_dma_class) {
+                                       .flags = NV_DMA_TARGET_VRAM |
+                                                NV_DMA_ACCESS_RDWR,
+                                       .start = 0,
+                                       .limit = pfb->ram.size - 1,
+                                       .conf0 = NVD0_DMA_CONF0_ENABLE | 0xfe |
+                                                NVD0_DMA_CONF0_PAGE_LP,
+                                }, sizeof(struct nv_dma_class), &object);
+       return ret;
+}
 
-       /* Almost like ack'ing a vblank interrupt, maybe in the spirit of
-        * cleaning up?
-        */
-       list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
-               struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
-               uint32_t mask = NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(crtc->index);
+static int
+nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head,
+                void *data, u32 size, u64 syncbuf,
+                struct nv50_dmac *dmac)
+{
+       struct nouveau_fb *pfb = nouveau_fb(core);
+       struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+       struct nouveau_object *object;
+       u32 pushbuf = *(u32 *)data;
+       int ret;
 
-               if (!crtc->base.enabled)
-                       continue;
+       dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE,
+                                       &dmac->handle);
+       if (!dmac->ptr)
+               return -ENOMEM;
 
-               nv_wr32(device, NV50_PDISPLAY_INTR_1, mask);
-               if (!nv_wait(device, NV50_PDISPLAY_INTR_1, mask, mask)) {
-                       NV_ERROR(drm, "timeout: (0x610024 & 0x%08x) == "
-                                     "0x%08x\n", mask, mask);
-                       NV_ERROR(drm, "0x610024 = 0x%08x\n",
-                                nv_rd32(device, NV50_PDISPLAY_INTR_1));
-               }
-       }
+       ret = nouveau_object_new(client, NVDRM_DEVICE, pushbuf,
+                                NV_DMA_FROM_MEMORY_CLASS,
+                                &(struct nv_dma_class) {
+                                       .flags = NV_DMA_TARGET_PCI_US |
+                                                NV_DMA_ACCESS_RD,
+                                       .start = dmac->handle + 0x0000,
+                                       .limit = dmac->handle + 0x0fff,
+                                }, sizeof(struct nv_dma_class), &object);
+       if (ret)
+               return ret;
 
-       for (i = 0; i < 2; i++) {
-               nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0);
-               if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
-                            NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
-                       NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
-                       NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
-                                nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
-               }
-       }
+       ret = nv50_chan_create(core, bclass, head, data, size, &dmac->base);
+       if (ret)
+               return ret;
 
-       nv50_evo_fini(dev);
+       ret = nouveau_object_new(client, dmac->base.handle, NvEvoSync,
+                                NV_DMA_IN_MEMORY_CLASS,
+                                &(struct nv_dma_class) {
+                                       .flags = NV_DMA_TARGET_VRAM |
+                                                NV_DMA_ACCESS_RDWR,
+                                       .start = syncbuf + 0x0000,
+                                       .limit = syncbuf + 0x0fff,
+                                }, sizeof(struct nv_dma_class), &object);
+       if (ret)
+               return ret;
 
-       for (i = 0; i < 3; i++) {
-               if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(i),
-                            NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
-                       NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
-                       NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
-                                 nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(i)));
-               }
-       }
+       ret = nouveau_object_new(client, dmac->base.handle, NvEvoVRAM,
+                                NV_DMA_IN_MEMORY_CLASS,
+                                &(struct nv_dma_class) {
+                                       .flags = NV_DMA_TARGET_VRAM |
+                                                NV_DMA_ACCESS_RDWR,
+                                       .start = 0,
+                                       .limit = pfb->ram.size - 1,
+                                }, sizeof(struct nv_dma_class), &object);
+       if (ret)
+               return ret;
 
-       /* disable interrupts. */
-       nv_wr32(device, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
+       if (nv_device(core)->card_type < NV_C0)
+               ret = nv50_dmac_create_fbdma(core, dmac->base.handle);
+       else
+       if (nv_device(core)->card_type < NV_D0)
+               ret = nvc0_dmac_create_fbdma(core, dmac->base.handle);
+       else
+               ret = nvd0_dmac_create_fbdma(core, dmac->base.handle);
+       return ret;
 }
 
-int
-nv50_display_create(struct drm_device *dev)
+struct nv50_mast {
+       struct nv50_dmac base;
+};
+
+struct nv50_curs {
+       struct nv50_pioc base;
+};
+
+struct nv50_sync {
+       struct nv50_dmac base;
+       struct {
+               u32 offset;
+               u16 value;
+       } sem;
+};
+
+struct nv50_ovly {
+       struct nv50_dmac base;
+};
+
+struct nv50_oimm {
+       struct nv50_pioc base;
+};
+
+struct nv50_head {
+       struct nouveau_crtc base;
+       struct nv50_curs curs;
+       struct nv50_sync sync;
+       struct nv50_ovly ovly;
+       struct nv50_oimm oimm;
+};
+
+#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
+#define nv50_curs(c) (&nv50_head(c)->curs)
+#define nv50_sync(c) (&nv50_head(c)->sync)
+#define nv50_ovly(c) (&nv50_head(c)->ovly)
+#define nv50_oimm(c) (&nv50_head(c)->oimm)
+#define nv50_chan(c) (&(c)->base.base)
+#define nv50_vers(c) nv_mclass(nv50_chan(c)->user)
+
+struct nv50_disp {
+       struct nouveau_object *core;
+       struct nv50_mast mast;
+
+       u32 modeset;
+
+       struct nouveau_bo *sync;
+};
+
+static struct nv50_disp *
+nv50_disp(struct drm_device *dev)
 {
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct dcb_table *dcb = &drm->vbios.dcb;
-       struct drm_connector *connector, *ct;
-       struct nv50_display *priv;
-       int ret, i;
-
-       NV_DEBUG(drm, "\n");
+       return nouveau_display(dev)->priv;
+}
 
-       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
+#define nv50_mast(d) (&nv50_disp(d)->mast)
 
-       nouveau_display(dev)->priv = priv;
-       nouveau_display(dev)->dtor = nv50_display_destroy;
-       nouveau_display(dev)->init = nv50_display_init;
-       nouveau_display(dev)->fini = nv50_display_fini;
+static struct drm_crtc *
+nv50_display_crtc_get(struct drm_encoder *encoder)
+{
+       return nouveau_encoder(encoder)->crtc;
+}
 
-       /* Create CRTC objects */
-       for (i = 0; i < 2; i++) {
-               ret = nv50_crtc_create(dev, i);
-               if (ret)
-                       return ret;
-       }
+/******************************************************************************
+ * EVO channel helpers
+ *****************************************************************************/
+static u32 *
+evo_wait(void *evoc, int nr)
+{
+       struct nv50_dmac *dmac = evoc;
+       u32 put = nv_ro32(dmac->base.user, 0x0000) / 4;
 
-       /* We setup the encoders from the BIOS table */
-       for (i = 0 ; i < dcb->entries; i++) {
-               struct dcb_output *entry = &dcb->entry[i];
+       if (put + nr >= (PAGE_SIZE / 4) - 8) {
+               dmac->ptr[put] = 0x20000000;
 
-               if (entry->location != DCB_LOC_ON_CHIP) {
-                       NV_WARN(drm, "Off-chip encoder %d/%d unsupported\n",
-                               entry->type, ffs(entry->or) - 1);
-                       continue;
+               nv_wo32(dmac->base.user, 0x0000, 0x00000000);
+               if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) {
+                       NV_ERROR(dmac->base.user, "channel stalled\n");
+                       return NULL;
                }
 
-               connector = nouveau_connector_create(dev, entry->connector);
-               if (IS_ERR(connector))
-                       continue;
-
-               switch (entry->type) {
-               case DCB_OUTPUT_TMDS:
-               case DCB_OUTPUT_LVDS:
-               case DCB_OUTPUT_DP:
-                       nv50_sor_create(connector, entry);
-                       break;
-               case DCB_OUTPUT_ANALOG:
-                       nv50_dac_create(connector, entry);
-                       break;
-               default:
-                       NV_WARN(drm, "DCB encoder %d unknown\n", entry->type);
-                       continue;
-               }
+               put = 0;
        }
 
-       list_for_each_entry_safe(connector, ct,
-                                &dev->mode_config.connector_list, head) {
-               if (!connector->encoder_ids[0]) {
-                       NV_WARN(drm, "%s has no encoders, removing\n",
-                               drm_get_connector_name(connector));
-                       connector->funcs->destroy(connector);
-               }
-       }
+       return dmac->ptr + put;
+}
 
-       tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
+static void
+evo_kick(u32 *push, void *evoc)
+{
+       struct nv50_dmac *dmac = evoc;
+       nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
+}
 
-       ret = nv50_evo_create(dev);
-       if (ret) {
-               nv50_display_destroy(dev);
-               return ret;
-       }
+#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
+#define evo_data(p,d)   *((p)++) = (d)
 
-       return 0;
+static bool
+evo_sync_wait(void *data)
+{
+       return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
 }
 
-void
-nv50_display_destroy(struct drm_device *dev)
+static int
+evo_sync(struct drm_device *dev)
 {
-       struct nv50_display *disp = nv50_display(dev);
+       struct nouveau_device *device = nouveau_dev(dev);
+       struct nv50_disp *disp = nv50_disp(dev);
+       struct nv50_mast *mast = nv50_mast(dev);
+       u32 *push = evo_wait(mast, 8);
+       if (push) {
+               nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
+               evo_mthd(push, 0x0084, 1);
+               evo_data(push, 0x80000000 | EVO_MAST_NTFY);
+               evo_mthd(push, 0x0080, 2);
+               evo_data(push, 0x00000000);
+               evo_data(push, 0x00000000);
+               evo_kick(push, mast);
+               if (nv_wait_cb(device, evo_sync_wait, disp->sync))
+                       return 0;
+       }
 
-       nv50_evo_destroy(dev);
-       kfree(disp);
+       return -EBUSY;
 }
 
+/******************************************************************************
+ * Page flipping channel
+ *****************************************************************************/
 struct nouveau_bo *
 nv50_display_crtc_sema(struct drm_device *dev, int crtc)
 {
-       return nv50_display(dev)->crtc[crtc].sem.bo;
+       return nv50_disp(dev)->sync;
 }
 
 void
 nv50_display_flip_stop(struct drm_crtc *crtc)
 {
-       struct nv50_display *disp = nv50_display(crtc->dev);
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
-       struct nouveau_channel *evo = dispc->sync;
-       int ret;
-
-       ret = RING_SPACE(evo, 8);
-       if (ret) {
-               WARN_ON(1);
-               return;
+       struct nv50_sync *sync = nv50_sync(crtc);
+       u32 *push;
+
+       push = evo_wait(sync, 8);
+       if (push) {
+               evo_mthd(push, 0x0084, 1);
+               evo_data(push, 0x00000000);
+               evo_mthd(push, 0x0094, 1);
+               evo_data(push, 0x00000000);
+               evo_mthd(push, 0x00c0, 1);
+               evo_data(push, 0x00000000);
+               evo_mthd(push, 0x0080, 1);
+               evo_data(push, 0x00000000);
+               evo_kick(push, sync);
        }
-
-       BEGIN_NV04(evo, 0, 0x0084, 1);
-       OUT_RING  (evo, 0x00000000);
-       BEGIN_NV04(evo, 0, 0x0094, 1);
-       OUT_RING  (evo, 0x00000000);
-       BEGIN_NV04(evo, 0, 0x00c0, 1);
-       OUT_RING  (evo, 0x00000000);
-       BEGIN_NV04(evo, 0, 0x0080, 1);
-       OUT_RING  (evo, 0x00000000);
-       FIRE_RING (evo);
 }
 
 int
 nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
-                      struct nouveau_channel *chan)
+                      struct nouveau_channel *chan, u32 swap_interval)
 {
-       struct nouveau_drm *drm = nouveau_drm(crtc->dev);
        struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
-       struct nv50_display *disp = nv50_display(crtc->dev);
+       struct nv50_disp *disp = nv50_disp(crtc->dev);
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
-       struct nouveau_channel *evo = dispc->sync;
+       struct nv50_sync *sync = nv50_sync(crtc);
+       u32 *push;
        int ret;
 
-       ret = RING_SPACE(evo, chan ? 25 : 27);
-       if (unlikely(ret))
-               return ret;
+       swap_interval <<= 4;
+       if (swap_interval == 0)
+               swap_interval |= 0x100;
+
+       push = evo_wait(sync, 128);
+       if (unlikely(push == NULL))
+               return -EBUSY;
 
        /* synchronise with the rendering channel, if necessary */
        if (likely(chan)) {
                ret = RING_SPACE(chan, 10);
-               if (ret) {
-                       WIND_RING(evo);
+               if (ret)
                        return ret;
-               }
 
-               if (nv_device(drm->device)->chipset < 0xc0) {
-                       BEGIN_NV04(chan, 0, 0x0060, 2);
+               if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
+                       BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
                        OUT_RING  (chan, NvEvoSema0 + nv_crtc->index);
-                       OUT_RING  (chan, dispc->sem.offset);
-                       BEGIN_NV04(chan, 0, 0x006c, 1);
-                       OUT_RING  (chan, 0xf00d0000 | dispc->sem.value);
-                       BEGIN_NV04(chan, 0, 0x0064, 2);
-                       OUT_RING  (chan, dispc->sem.offset ^ 0x10);
+                       OUT_RING  (chan, sync->sem.offset);
+                       BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
+                       OUT_RING  (chan, 0xf00d0000 | sync->sem.value);
+                       BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
+                       OUT_RING  (chan, sync->sem.offset ^ 0x10);
                        OUT_RING  (chan, 0x74b1e000);
-                       BEGIN_NV04(chan, 0, 0x0060, 1);
-                       if (nv_device(drm->device)->chipset < 0x84)
+                       BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
+                       if (nv_mclass(chan->object) < NV84_CHANNEL_DMA_CLASS)
                                OUT_RING  (chan, NvSema);
                        else
                                OUT_RING  (chan, chan->vram);
                } else {
                        u64 offset = nvc0_fence_crtc(chan, nv_crtc->index);
-                       offset += dispc->sem.offset;
-                       BEGIN_NVC0(chan, 0, 0x0010, 4);
+                       offset += sync->sem.offset;
+
+                       BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
                        OUT_RING  (chan, upper_32_bits(offset));
                        OUT_RING  (chan, lower_32_bits(offset));
-                       OUT_RING  (chan, 0xf00d0000 | dispc->sem.value);
+                       OUT_RING  (chan, 0xf00d0000 | sync->sem.value);
                        OUT_RING  (chan, 0x1002);
-                       BEGIN_NVC0(chan, 0, 0x0010, 4);
+                       BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
                        OUT_RING  (chan, upper_32_bits(offset));
                        OUT_RING  (chan, lower_32_bits(offset ^ 0x10));
                        OUT_RING  (chan, 0x74b1e000);
                        OUT_RING  (chan, 0x1001);
                }
+
                FIRE_RING (chan);
        } else {
-               nouveau_bo_wr32(dispc->sem.bo, dispc->sem.offset / 4,
-                               0xf00d0000 | dispc->sem.value);
+               nouveau_bo_wr32(disp->sync, sync->sem.offset / 4,
+                               0xf00d0000 | sync->sem.value);
+               evo_sync(crtc->dev);
        }
 
-       /* queue the flip on the crtc's "display sync" channel */
-       BEGIN_NV04(evo, 0, 0x0100, 1);
-       OUT_RING  (evo, 0xfffe0000);
-       if (chan) {
-               BEGIN_NV04(evo, 0, 0x0084, 1);
-               OUT_RING  (evo, 0x00000100);
+       /* queue the flip */
+       evo_mthd(push, 0x0100, 1);
+       evo_data(push, 0xfffe0000);
+       evo_mthd(push, 0x0084, 1);
+       evo_data(push, swap_interval);
+       if (!(swap_interval & 0x00000100)) {
+               evo_mthd(push, 0x00e0, 1);
+               evo_data(push, 0x40000000);
+       }
+       evo_mthd(push, 0x0088, 4);
+       evo_data(push, sync->sem.offset);
+       evo_data(push, 0xf00d0000 | sync->sem.value);
+       evo_data(push, 0x74b1e000);
+       evo_data(push, NvEvoSync);
+       evo_mthd(push, 0x00a0, 2);
+       evo_data(push, 0x00000000);
+       evo_data(push, 0x00000000);
+       evo_mthd(push, 0x00c0, 1);
+       evo_data(push, nv_fb->r_dma);
+       evo_mthd(push, 0x0110, 2);
+       evo_data(push, 0x00000000);
+       evo_data(push, 0x00000000);
+       if (nv50_vers(sync) < NVD0_DISP_SYNC_CLASS) {
+               evo_mthd(push, 0x0800, 5);
+               evo_data(push, nv_fb->nvbo->bo.offset >> 8);
+               evo_data(push, 0);
+               evo_data(push, (fb->height << 16) | fb->width);
+               evo_data(push, nv_fb->r_pitch);
+               evo_data(push, nv_fb->r_format);
        } else {
-               BEGIN_NV04(evo, 0, 0x0084, 1);
-               OUT_RING  (evo, 0x00000010);
-               /* allows gamma somehow, PDISP will bitch at you if
-                * you don't wait for vblank before changing this..
-                */
-               BEGIN_NV04(evo, 0, 0x00e0, 1);
-               OUT_RING  (evo, 0x40000000);
-       }
-       BEGIN_NV04(evo, 0, 0x0088, 4);
-       OUT_RING  (evo, dispc->sem.offset);
-       OUT_RING  (evo, 0xf00d0000 | dispc->sem.value);
-       OUT_RING  (evo, 0x74b1e000);
-       OUT_RING  (evo, NvEvoSync);
-       BEGIN_NV04(evo, 0, 0x00a0, 2);
-       OUT_RING  (evo, 0x00000000);
-       OUT_RING  (evo, 0x00000000);
-       BEGIN_NV04(evo, 0, 0x00c0, 1);
-       OUT_RING  (evo, nv_fb->r_dma);
-       BEGIN_NV04(evo, 0, 0x0110, 2);
-       OUT_RING  (evo, 0x00000000);
-       OUT_RING  (evo, 0x00000000);
-       BEGIN_NV04(evo, 0, 0x0800, 5);
-       OUT_RING  (evo, nv_fb->nvbo->bo.offset >> 8);
-       OUT_RING  (evo, 0);
-       OUT_RING  (evo, (fb->height << 16) | fb->width);
-       OUT_RING  (evo, nv_fb->r_pitch);
-       OUT_RING  (evo, nv_fb->r_format);
-       BEGIN_NV04(evo, 0, 0x0080, 1);
-       OUT_RING  (evo, 0x00000000);
-       FIRE_RING (evo);
-
-       dispc->sem.offset ^= 0x10;
-       dispc->sem.value++;
+               evo_mthd(push, 0x0400, 5);
+               evo_data(push, nv_fb->nvbo->bo.offset >> 8);
+               evo_data(push, 0);
+               evo_data(push, (fb->height << 16) | fb->width);
+               evo_data(push, nv_fb->r_pitch);
+               evo_data(push, nv_fb->r_format);
+       }
+       evo_mthd(push, 0x0080, 1);
+       evo_data(push, 0x00000000);
+       evo_kick(push, sync);
+
+       sync->sem.offset ^= 0x10;
+       sync->sem.value++;
        return 0;
 }
 
-static u16
-nv50_display_script_select(struct drm_device *dev, struct dcb_output *dcb,
-                          u32 mc, int pxclk)
+/******************************************************************************
+ * CRTC
+ *****************************************************************************/
+static int
+nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
 {
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_connector *nv_connector = NULL;
-       struct drm_encoder *encoder;
-       struct nvbios *bios = &drm->vbios;
-       u32 script = 0, or;
-
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-               struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-
-               if (nv_encoder->dcb != dcb)
-                       continue;
+       struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+       struct nouveau_connector *nv_connector;
+       struct drm_connector *connector;
+       u32 *push, mode = 0x00;
+
+       nv_connector = nouveau_crtc_connector_get(nv_crtc);
+       connector = &nv_connector->base;
+       if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
+               if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
+                       mode = DITHERING_MODE_DYNAMIC2X2;
+       } else {
+               mode = nv_connector->dithering_mode;
+       }
 
-               nv_connector = nouveau_encoder_connector_get(nv_encoder);
-               break;
+       if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
+               if (connector->display_info.bpc >= 8)
+                       mode |= DITHERING_DEPTH_8BPC;
+       } else {
+               mode |= nv_connector->dithering_depth;
        }
 
-       or = ffs(dcb->or) - 1;
-       switch (dcb->type) {
-       case DCB_OUTPUT_LVDS:
-               script = (mc >> 8) & 0xf;
-               if (bios->fp_no_ddc) {
-                       if (bios->fp.dual_link)
-                               script |= 0x0100;
-                       if (bios->fp.if_is_24bit)
-                               script |= 0x0200;
+       push = evo_wait(mast, 4);
+       if (push) {
+               if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1);
+                       evo_data(push, mode);
+               } else
+               if (nv50_vers(mast) < NVE0_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1);
+                       evo_data(push, mode);
                } else {
-                       /* determine number of lvds links */
-                       if (nv_connector && nv_connector->edid &&
-                           nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
-                               /* http://www.spwg.org */
-                               if (((u8 *)nv_connector->edid)[121] == 2)
-                                       script |= 0x0100;
-                       } else
-                       if (pxclk >= bios->fp.duallink_transition_clk) {
-                               script |= 0x0100;
-                       }
-
-                       /* determine panel depth */
-                       if (script & 0x0100) {
-                               if (bios->fp.strapless_is_24bit & 2)
-                                       script |= 0x0200;
-                       } else {
-                               if (bios->fp.strapless_is_24bit & 1)
-                                       script |= 0x0200;
-                       }
+                       evo_mthd(push, 0x04a0 + (nv_crtc->index * 0x0300), 1);
+                       evo_data(push, mode);
+               }
 
-                       if (nv_connector && nv_connector->edid &&
-                           (nv_connector->edid->revision >= 4) &&
-                           (nv_connector->edid->input & 0x70) >= 0x20)
-                               script |= 0x0200;
+               if (update) {
+                       evo_mthd(push, 0x0080, 1);
+                       evo_data(push, 0x00000000);
                }
-               break;
-       case DCB_OUTPUT_TMDS:
-               script = (mc >> 8) & 0xf;
-               if (pxclk >= 165000)
-                       script |= 0x0100;
-               break;
-       case DCB_OUTPUT_DP:
-               script = (mc >> 8) & 0xf;
-               break;
-       case DCB_OUTPUT_ANALOG:
-               script = 0xff;
-               break;
-       default:
-               NV_ERROR(drm, "modeset on unsupported output type!\n");
-               break;
+               evo_kick(push, mast);
        }
 
-       return script;
+       return 0;
 }
 
-static void
-nv50_display_unk10_handler(struct drm_device *dev)
+static int
+nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
 {
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nv50_display *disp = nv50_display(dev);
-       u32 unk30 = nv_rd32(device, 0x610030), mc;
-       int i, crtc, or = 0, type = DCB_OUTPUT_ANY;
-
-       NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
-       disp->irq.dcb = NULL;
-
-       nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) & ~8);
-
-       /* Determine which CRTC we're dealing with, only 1 ever will be
-        * signalled at the same time with the current nouveau code.
+       struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+       struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
+       struct drm_crtc *crtc = &nv_crtc->base;
+       struct nouveau_connector *nv_connector;
+       int mode = DRM_MODE_SCALE_NONE;
+       u32 oX, oY, *push;
+
+       /* start off at the resolution we programmed the crtc for, this
+        * effectively handles NONE/FULL scaling
         */
-       crtc = ffs((unk30 & 0x00000060) >> 5) - 1;
-       if (crtc < 0)
-               goto ack;
-
-       /* Nothing needs to be done for the encoder */
-       crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
-       if (crtc < 0)
-               goto ack;
-
-       /* Find which encoder was connected to the CRTC */
-       for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) {
-               mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
-               NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc);
-               if (!(mc & (1 << crtc)))
-                       continue;
-
-               switch ((mc & 0x00000f00) >> 8) {
-               case 0: type = DCB_OUTPUT_ANALOG; break;
-               case 1: type = DCB_OUTPUT_TV; break;
-               default:
-                       NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
-                       goto ack;
+       nv_connector = nouveau_crtc_connector_get(nv_crtc);
+       if (nv_connector && nv_connector->native_mode)
+               mode = nv_connector->scaling_mode;
+
+       if (mode != DRM_MODE_SCALE_NONE)
+               omode = nv_connector->native_mode;
+       else
+               omode = umode;
+
+       oX = omode->hdisplay;
+       oY = omode->vdisplay;
+       if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
+               oY *= 2;
+
+       /* add overscan compensation if necessary, will keep the aspect
+        * ratio the same as the backend mode unless overridden by the
+        * user setting both hborder and vborder properties.
+        */
+       if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
+                            (nv_connector->underscan == UNDERSCAN_AUTO &&
+                             nv_connector->edid &&
+                             drm_detect_hdmi_monitor(nv_connector->edid)))) {
+               u32 bX = nv_connector->underscan_hborder;
+               u32 bY = nv_connector->underscan_vborder;
+               u32 aspect = (oY << 19) / oX;
+
+               if (bX) {
+                       oX -= (bX * 2);
+                       if (bY) oY -= (bY * 2);
+                       else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
+               } else {
+                       oX -= (oX >> 4) + 32;
+                       if (bY) oY -= (bY * 2);
+                       else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
                }
-
-               or = i;
        }
 
-       for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
-               if (nv_device(drm->device)->chipset  < 0x90 ||
-                   nv_device(drm->device)->chipset == 0x92 ||
-                   nv_device(drm->device)->chipset == 0xa0)
-                       mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
-               else
-                       mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
-
-               NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc);
-               if (!(mc & (1 << crtc)))
-                       continue;
-
-               switch ((mc & 0x00000f00) >> 8) {
-               case 0: type = DCB_OUTPUT_LVDS; break;
-               case 1: type = DCB_OUTPUT_TMDS; break;
-               case 2: type = DCB_OUTPUT_TMDS; break;
-               case 5: type = DCB_OUTPUT_TMDS; break;
-               case 8: type = DCB_OUTPUT_DP; break;
-               case 9: type = DCB_OUTPUT_DP; break;
-               default:
-                       NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
-                       goto ack;
+       /* handle CENTER/ASPECT scaling, taking into account the areas
+        * removed already for overscan compensation
+        */
+       switch (mode) {
+       case DRM_MODE_SCALE_CENTER:
+               oX = min((u32)umode->hdisplay, oX);
+               oY = min((u32)umode->vdisplay, oY);
+               /* fall-through */
+       case DRM_MODE_SCALE_ASPECT:
+               if (oY < oX) {
+                       u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
+                       oX = ((oY * aspect) + (aspect / 2)) >> 19;
+               } else {
+                       u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
+                       oY = ((oX * aspect) + (aspect / 2)) >> 19;
                }
-
-               or = i;
+               break;
+       default:
+               break;
        }
 
-       /* There was no encoder to disable */
-       if (type == DCB_OUTPUT_ANY)
-               goto ack;
+       push = evo_wait(mast, 8);
+       if (push) {
+               if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+                       /*XXX: SCALE_CTRL_ACTIVE??? */
+                       evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2);
+                       evo_data(push, (oY << 16) | oX);
+                       evo_data(push, (oY << 16) | oX);
+                       evo_mthd(push, 0x08a4 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, 0x00000000);
+                       evo_mthd(push, 0x08c8 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+               } else {
+                       evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
+                       evo_data(push, (oY << 16) | oX);
+                       evo_data(push, (oY << 16) | oX);
+                       evo_data(push, (oY << 16) | oX);
+                       evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, 0x00000000);
+                       evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+               }
 
-       /* Disable the encoder */
-       for (i = 0; i < drm->vbios.dcb.entries; i++) {
-               struct dcb_output *dcb = &drm->vbios.dcb.entry[i];
+               evo_kick(push, mast);
 
-               if (dcb->type == type && (dcb->or & (1 << or))) {
-                       nouveau_bios_run_display_table(dev, 0, -1, dcb, -1);
-                       disp->irq.dcb = dcb;
-                       goto ack;
+               if (update) {
+                       nv50_display_flip_stop(crtc);
+                       nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
                }
        }
 
-       NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc);
-ack:
-       nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
-       nv_wr32(device, 0x610030, 0x80000000);
+       return 0;
 }
 
-static void
-nv50_display_unk20_handler(struct drm_device *dev)
+static int
+nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
 {
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nv50_display *disp = nv50_display(dev);
-       u32 unk30 = nv_rd32(device, 0x610030), tmp, pclk, script, mc = 0;
-       struct dcb_output *dcb;
-       int i, crtc, or = 0, type = DCB_OUTPUT_ANY;
-
-       NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
-       dcb = disp->irq.dcb;
-       if (dcb) {
-               nouveau_bios_run_display_table(dev, 0, -2, dcb, -1);
-               disp->irq.dcb = NULL;
-       }
-
-       /* CRTC clock change requested? */
-       crtc = ffs((unk30 & 0x00000600) >> 9) - 1;
-       if (crtc >= 0) {
-               pclk  = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
-               pclk &= 0x003fffff;
-               if (pclk)
-                       nv50_crtc_set_clock(dev, crtc, pclk);
-
-               tmp = nv_rd32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
-               tmp &= ~0x000000f;
-               nv_wr32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp);
-       }
-
-       /* Nothing needs to be done for the encoder */
-       crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
-       if (crtc < 0)
-               goto ack;
-       pclk  = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff;
-
-       /* Find which encoder is connected to the CRTC */
-       for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) {
-               mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_P(i));
-               NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc);
-               if (!(mc & (1 << crtc)))
-                       continue;
-
-               switch ((mc & 0x00000f00) >> 8) {
-               case 0: type = DCB_OUTPUT_ANALOG; break;
-               case 1: type = DCB_OUTPUT_TV; break;
-               default:
-                       NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
-                       goto ack;
+       struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+       u32 *push, hue, vib;
+       int adj;
+
+       adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
+       vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
+       hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
+
+       push = evo_wait(mast, 16);
+       if (push) {
+               if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, (hue << 20) | (vib << 8));
+               } else {
+                       evo_mthd(push, 0x0498 + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, (hue << 20) | (vib << 8));
                }
 
-               or = i;
+               if (update) {
+                       evo_mthd(push, 0x0080, 1);
+                       evo_data(push, 0x00000000);
+               }
+               evo_kick(push, mast);
        }
 
-       for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
-               if (nv_device(drm->device)->chipset  < 0x90 ||
-                   nv_device(drm->device)->chipset == 0x92 ||
-                   nv_device(drm->device)->chipset == 0xa0)
-                       mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_P(i));
-               else
-                       mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_P(i));
-
-               NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc);
-               if (!(mc & (1 << crtc)))
-                       continue;
+       return 0;
+}
 
-               switch ((mc & 0x00000f00) >> 8) {
-               case 0: type = DCB_OUTPUT_LVDS; break;
-               case 1: type = DCB_OUTPUT_TMDS; break;
-               case 2: type = DCB_OUTPUT_TMDS; break;
-               case 5: type = DCB_OUTPUT_TMDS; break;
-               case 8: type = DCB_OUTPUT_DP; break;
-               case 9: type = DCB_OUTPUT_DP; break;
-               default:
-                       NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
-                       goto ack;
+static int
+nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
+                   int x, int y, bool update)
+{
+       struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
+       struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+       u32 *push;
+
+       push = evo_wait(mast, 16);
+       if (push) {
+               if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, nvfb->nvbo->bo.offset >> 8);
+                       evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3);
+                       evo_data(push, (fb->height << 16) | fb->width);
+                       evo_data(push, nvfb->r_pitch);
+                       evo_data(push, nvfb->r_format);
+                       evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, (y << 16) | x);
+                       if (nv50_vers(mast) > NV50_DISP_MAST_CLASS) {
+                               evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+                               evo_data(push, nvfb->r_dma);
+                       }
+               } else {
+                       evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, nvfb->nvbo->bo.offset >> 8);
+                       evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
+                       evo_data(push, (fb->height << 16) | fb->width);
+                       evo_data(push, nvfb->r_pitch);
+                       evo_data(push, nvfb->r_format);
+                       evo_data(push, nvfb->r_dma);
+                       evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, (y << 16) | x);
                }
 
-               or = i;
+               if (update) {
+                       evo_mthd(push, 0x0080, 1);
+                       evo_data(push, 0x00000000);
+               }
+               evo_kick(push, mast);
        }
 
-       if (type == DCB_OUTPUT_ANY)
-               goto ack;
+       nv_crtc->fb.tile_flags = nvfb->r_dma;
+       return 0;
+}
 
-       /* Enable the encoder */
-       for (i = 0; i < drm->vbios.dcb.entries; i++) {
-               dcb = &drm->vbios.dcb.entry[i];
-               if (dcb->type == type && (dcb->or & (1 << or)))
-                       break;
+static void
+nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
+{
+       struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+       u32 *push = evo_wait(mast, 16);
+       if (push) {
+               if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
+                       evo_data(push, 0x85000000);
+                       evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+               } else
+               if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
+                       evo_data(push, 0x85000000);
+                       evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+                       evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, NvEvoVRAM);
+               } else {
+                       evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
+                       evo_data(push, 0x85000000);
+                       evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+                       evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, NvEvoVRAM);
+               }
+               evo_kick(push, mast);
        }
+}
 
-       if (i == drm->vbios.dcb.entries) {
-               NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc);
-               goto ack;
+static void
+nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc)
+{
+       struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+       u32 *push = evo_wait(mast, 16);
+       if (push) {
+               if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, 0x05000000);
+               } else
+               if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, 0x05000000);
+                       evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, 0x00000000);
+               } else {
+                       evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, 0x05000000);
+                       evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, 0x00000000);
+               }
+               evo_kick(push, mast);
        }
+}
 
-       script = nv50_display_script_select(dev, dcb, mc, pclk);
-       nouveau_bios_run_display_table(dev, script, pclk, dcb, -1);
-
-       if (type == DCB_OUTPUT_DP) {
-               int link = !(dcb->dpconf.sor.link & 1);
-               if ((mc & 0x000f0000) == 0x00020000)
-                       nv50_sor_dp_calc_tu(dev, or, link, pclk, 18);
-               else
-                       nv50_sor_dp_calc_tu(dev, or, link, pclk, 24);
+static void
+nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
+{
+       struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+
+       if (show)
+               nv50_crtc_cursor_show(nv_crtc);
+       else
+               nv50_crtc_cursor_hide(nv_crtc);
+
+       if (update) {
+               u32 *push = evo_wait(mast, 2);
+               if (push) {
+                       evo_mthd(push, 0x0080, 1);
+                       evo_data(push, 0x00000000);
+                       evo_kick(push, mast);
+               }
        }
+}
 
-       if (dcb->type != DCB_OUTPUT_ANALOG) {
-               tmp = nv_rd32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
-               tmp &= ~0x00000f0f;
-               if (script & 0x0100)
-                       tmp |= 0x00000101;
-               nv_wr32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp);
-       } else {
-               nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
-       }
+static void
+nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static void
+nv50_crtc_prepare(struct drm_crtc *crtc)
+{
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       struct nv50_mast *mast = nv50_mast(crtc->dev);
+       u32 *push;
+
+       nv50_display_flip_stop(crtc);
+
+       push = evo_wait(mast, 2);
+       if (push) {
+               if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, 0x00000000);
+                       evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, 0x40000000);
+               } else
+               if (nv50_vers(mast) <  NVD0_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, 0x00000000);
+                       evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, 0x40000000);
+                       evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, 0x00000000);
+               } else {
+                       evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, 0x00000000);
+                       evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, 0x03000000);
+                       evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, 0x00000000);
+               }
 
-       disp->irq.dcb = dcb;
-       disp->irq.pclk = pclk;
-       disp->irq.script = script;
+               evo_kick(push, mast);
+       }
 
-ack:
-       nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
-       nv_wr32(device, 0x610030, 0x80000000);
+       nv50_crtc_cursor_show_hide(nv_crtc, false, false);
 }
 
-/* If programming a TMDS output on a SOR that can also be configured for
- * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
- *
- * It looks like the VBIOS TMDS scripts make an attempt at this, however,
- * the VBIOS scripts on at least one board I have only switch it off on
- * link 0, causing a blank display if the output has previously been
- * programmed for DisplayPort.
- */
 static void
-nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_output *dcb)
+nv50_crtc_commit(struct drm_crtc *crtc)
 {
-       struct nouveau_device *device = nouveau_dev(dev);
-       int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       struct nv50_mast *mast = nv50_mast(crtc->dev);
+       u32 *push;
+
+       push = evo_wait(mast, 32);
+       if (push) {
+               if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, NvEvoVRAM_LP);
+                       evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+                       evo_data(push, 0xc0000000);
+                       evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+               } else
+               if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, nv_crtc->fb.tile_flags);
+                       evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+                       evo_data(push, 0xc0000000);
+                       evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+                       evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, NvEvoVRAM);
+               } else {
+                       evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, nv_crtc->fb.tile_flags);
+                       evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
+                       evo_data(push, 0x83000000);
+                       evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+                       evo_data(push, 0x00000000);
+                       evo_data(push, 0x00000000);
+                       evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, NvEvoVRAM);
+                       evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, 0xffffff00);
+               }
+
+               evo_kick(push, mast);
+       }
+
+       nv50_crtc_cursor_show_hide(nv_crtc, nv_crtc->cursor.visible, true);
+       nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+}
+
+static bool
+nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
+                    struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static int
+nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
+{
+       struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
+       int ret;
+
+       ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
+       if (ret)
+               return ret;
+
+       if (old_fb) {
+               nvfb = nouveau_framebuffer(old_fb);
+               nouveau_bo_unpin(nvfb->nvbo);
+       }
+
+       return 0;
+}
+
+static int
+nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
+                  struct drm_display_mode *mode, int x, int y,
+                  struct drm_framebuffer *old_fb)
+{
+       struct nv50_mast *mast = nv50_mast(crtc->dev);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       struct nouveau_connector *nv_connector;
+       u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
+       u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
+       u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
+       u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
+       u32 vblan2e = 0, vblan2s = 1;
+       u32 *push;
+       int ret;
+
+       hactive = mode->htotal;
+       hsynce  = mode->hsync_end - mode->hsync_start - 1;
+       hbackp  = mode->htotal - mode->hsync_end;
+       hblanke = hsynce + hbackp;
+       hfrontp = mode->hsync_start - mode->hdisplay;
+       hblanks = mode->htotal - hfrontp - 1;
+
+       vactive = mode->vtotal * vscan / ilace;
+       vsynce  = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
+       vbackp  = (mode->vtotal - mode->vsync_end) * vscan / ilace;
+       vblanke = vsynce + vbackp;
+       vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+       vblanks = vactive - vfrontp - 1;
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+               vblan2e = vactive + vsynce + vbackp;
+               vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
+               vactive = (vactive * 2) + 1;
+       }
+
+       ret = nv50_crtc_swap_fbs(crtc, old_fb);
+       if (ret)
+               return ret;
+
+       push = evo_wait(mast, 64);
+       if (push) {
+               if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2);
+                       evo_data(push, 0x00800000 | mode->clock);
+                       evo_data(push, (ilace == 2) ? 2 : 0);
+                       evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6);
+                       evo_data(push, 0x00000000);
+                       evo_data(push, (vactive << 16) | hactive);
+                       evo_data(push, ( vsynce << 16) | hsynce);
+                       evo_data(push, (vblanke << 16) | hblanke);
+                       evo_data(push, (vblanks << 16) | hblanks);
+                       evo_data(push, (vblan2e << 16) | vblan2s);
+                       evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, 0x00000000);
+                       evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2);
+                       evo_data(push, 0x00000311);
+                       evo_data(push, 0x00000100);
+               } else {
+                       evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
+                       evo_data(push, 0x00000000);
+                       evo_data(push, (vactive << 16) | hactive);
+                       evo_data(push, ( vsynce << 16) | hsynce);
+                       evo_data(push, (vblanke << 16) | hblanke);
+                       evo_data(push, (vblanks << 16) | hblanks);
+                       evo_data(push, (vblan2e << 16) | vblan2s);
+                       evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, 0x00000000); /* ??? */
+                       evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
+                       evo_data(push, mode->clock * 1000);
+                       evo_data(push, 0x00200000); /* ??? */
+                       evo_data(push, mode->clock * 1000);
+                       evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
+                       evo_data(push, 0x00000311);
+                       evo_data(push, 0x00000100);
+               }
+
+               evo_kick(push, mast);
+       }
+
+       nv_connector = nouveau_crtc_connector_get(nv_crtc);
+       nv50_crtc_set_dither(nv_crtc, false);
+       nv50_crtc_set_scale(nv_crtc, false);
+       nv50_crtc_set_color_vibrance(nv_crtc, false);
+       nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
+       return 0;
+}
+
+static int
+nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+                       struct drm_framebuffer *old_fb)
+{
+       struct nouveau_drm *drm = nouveau_drm(crtc->dev);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       int ret;
+
+       if (!crtc->fb) {
+               NV_DEBUG(drm, "No FB bound\n");
+               return 0;
+       }
+
+       ret = nv50_crtc_swap_fbs(crtc, old_fb);
+       if (ret)
+               return ret;
+
+       nv50_display_flip_stop(crtc);
+       nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
+       nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+       return 0;
+}
+
+static int
+nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
+                              struct drm_framebuffer *fb, int x, int y,
+                              enum mode_set_atomic state)
+{
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       nv50_display_flip_stop(crtc);
+       nv50_crtc_set_image(nv_crtc, fb, x, y, true);
+       return 0;
+}
+
+static void
+nv50_crtc_lut_load(struct drm_crtc *crtc)
+{
+       struct nv50_disp *disp = nv50_disp(crtc->dev);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
+       int i;
+
+       for (i = 0; i < 256; i++) {
+               u16 r = nv_crtc->lut.r[i] >> 2;
+               u16 g = nv_crtc->lut.g[i] >> 2;
+               u16 b = nv_crtc->lut.b[i] >> 2;
+
+               if (nv_mclass(disp->core) < NVD0_DISP_CLASS) {
+                       writew(r + 0x0000, lut + (i * 0x08) + 0);
+                       writew(g + 0x0000, lut + (i * 0x08) + 2);
+                       writew(b + 0x0000, lut + (i * 0x08) + 4);
+               } else {
+                       writew(r + 0x6000, lut + (i * 0x20) + 0);
+                       writew(g + 0x6000, lut + (i * 0x20) + 2);
+                       writew(b + 0x6000, lut + (i * 0x20) + 4);
+               }
+       }
+}
+
+static int
+nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+                    uint32_t handle, uint32_t width, uint32_t height)
+{
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct drm_gem_object *gem;
+       struct nouveau_bo *nvbo;
+       bool visible = (handle != 0);
+       int i, ret = 0;
+
+       if (visible) {
+               if (width != 64 || height != 64)
+                       return -EINVAL;
+
+               gem = drm_gem_object_lookup(dev, file_priv, handle);
+               if (unlikely(!gem))
+                       return -ENOENT;
+               nvbo = nouveau_gem_object(gem);
+
+               ret = nouveau_bo_map(nvbo);
+               if (ret == 0) {
+                       for (i = 0; i < 64 * 64; i++) {
+                               u32 v = nouveau_bo_rd32(nvbo, i);
+                               nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
+                       }
+                       nouveau_bo_unmap(nvbo);
+               }
+
+               drm_gem_object_unreference_unlocked(gem);
+       }
+
+       if (visible != nv_crtc->cursor.visible) {
+               nv50_crtc_cursor_show_hide(nv_crtc, visible, true);
+               nv_crtc->cursor.visible = visible;
+       }
+
+       return ret;
+}
+
+static int
+nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+       struct nv50_curs *curs = nv50_curs(crtc);
+       struct nv50_chan *chan = nv50_chan(curs);
+       nv_wo32(chan->user, 0x0084, (y << 16) | (x & 0xffff));
+       nv_wo32(chan->user, 0x0080, 0x00000000);
+       return 0;
+}
+
+static void
+nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+                   uint32_t start, uint32_t size)
+{
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       u32 end = max(start + size, (u32)256);
+       u32 i;
+
+       for (i = start; i < end; i++) {
+               nv_crtc->lut.r[i] = r[i];
+               nv_crtc->lut.g[i] = g[i];
+               nv_crtc->lut.b[i] = b[i];
+       }
+
+       nv50_crtc_lut_load(crtc);
+}
+
+static void
+nv50_crtc_destroy(struct drm_crtc *crtc)
+{
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       struct nv50_disp *disp = nv50_disp(crtc->dev);
+       struct nv50_head *head = nv50_head(crtc);
+       nv50_dmac_destroy(disp->core, &head->ovly.base);
+       nv50_pioc_destroy(disp->core, &head->oimm.base);
+       nv50_dmac_destroy(disp->core, &head->sync.base);
+       nv50_pioc_destroy(disp->core, &head->curs.base);
+       nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+       if (nv_crtc->cursor.nvbo)
+               nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+       nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+       nouveau_bo_unmap(nv_crtc->lut.nvbo);
+       if (nv_crtc->lut.nvbo)
+               nouveau_bo_unpin(nv_crtc->lut.nvbo);
+       nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+       drm_crtc_cleanup(crtc);
+       kfree(crtc);
+}
+
+static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = {
+       .dpms = nv50_crtc_dpms,
+       .prepare = nv50_crtc_prepare,
+       .commit = nv50_crtc_commit,
+       .mode_fixup = nv50_crtc_mode_fixup,
+       .mode_set = nv50_crtc_mode_set,
+       .mode_set_base = nv50_crtc_mode_set_base,
+       .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
+       .load_lut = nv50_crtc_lut_load,
+};
+
+static const struct drm_crtc_funcs nv50_crtc_func = {
+       .cursor_set = nv50_crtc_cursor_set,
+       .cursor_move = nv50_crtc_cursor_move,
+       .gamma_set = nv50_crtc_gamma_set,
+       .set_config = drm_crtc_helper_set_config,
+       .destroy = nv50_crtc_destroy,
+       .page_flip = nouveau_crtc_page_flip,
+};
+
+static void
+nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
+{
+}
+
+static void
+nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
+{
+}
+
+static int
+nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
+{
+       struct nv50_disp *disp = nv50_disp(dev);
+       struct nv50_head *head;
+       struct drm_crtc *crtc;
+       int ret, i;
+
+       head = kzalloc(sizeof(*head), GFP_KERNEL);
+       if (!head)
+               return -ENOMEM;
+
+       head->base.index = index;
+       head->base.set_dither = nv50_crtc_set_dither;
+       head->base.set_scale = nv50_crtc_set_scale;
+       head->base.set_color_vibrance = nv50_crtc_set_color_vibrance;
+       head->base.color_vibrance = 50;
+       head->base.vibrant_hue = 0;
+       head->base.cursor.set_offset = nv50_cursor_set_offset;
+       head->base.cursor.set_pos = nv50_cursor_set_pos;
+       for (i = 0; i < 256; i++) {
+               head->base.lut.r[i] = i << 8;
+               head->base.lut.g[i] = i << 8;
+               head->base.lut.b[i] = i << 8;
+       }
+
+       crtc = &head->base.base;
+       drm_crtc_init(dev, crtc, &nv50_crtc_func);
+       drm_crtc_helper_add(crtc, &nv50_crtc_hfunc);
+       drm_mode_crtc_set_gamma_size(crtc, 256);
+
+       ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
+                            0, 0x0000, NULL, &head->base.lut.nvbo);
+       if (!ret) {
+               ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM);
+               if (!ret) {
+                       ret = nouveau_bo_map(head->base.lut.nvbo);
+                       if (ret)
+                               nouveau_bo_unpin(head->base.lut.nvbo);
+               }
+               if (ret)
+                       nouveau_bo_ref(NULL, &head->base.lut.nvbo);
+       }
+
+       if (ret)
+               goto out;
+
+       nv50_crtc_lut_load(crtc);
+
+       /* allocate cursor resources */
+       ret = nv50_pioc_create(disp->core, NV50_DISP_CURS_CLASS, index,
+                             &(struct nv50_display_curs_class) {
+                                       .head = index,
+                             }, sizeof(struct nv50_display_curs_class),
+                             &head->curs.base);
+       if (ret)
+               goto out;
+
+       ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
+                            0, 0x0000, NULL, &head->base.cursor.nvbo);
+       if (!ret) {
+               ret = nouveau_bo_pin(head->base.cursor.nvbo, TTM_PL_FLAG_VRAM);
+               if (!ret) {
+                       ret = nouveau_bo_map(head->base.cursor.nvbo);
+                       if (ret)
+                               nouveau_bo_unpin(head->base.lut.nvbo);
+               }
+               if (ret)
+                       nouveau_bo_ref(NULL, &head->base.cursor.nvbo);
+       }
+
+       if (ret)
+               goto out;
+
+       /* allocate page flip / sync resources */
+       ret = nv50_dmac_create(disp->core, NV50_DISP_SYNC_CLASS, index,
+                             &(struct nv50_display_sync_class) {
+                                       .pushbuf = EVO_PUSH_HANDLE(SYNC, index),
+                                       .head = index,
+                             }, sizeof(struct nv50_display_sync_class),
+                             disp->sync->bo.offset, &head->sync.base);
+       if (ret)
+               goto out;
+
+       head->sync.sem.offset = EVO_SYNC(1 + index, 0x00);
+
+       /* allocate overlay resources */
+       ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index,
+                             &(struct nv50_display_oimm_class) {
+                                       .head = index,
+                             }, sizeof(struct nv50_display_oimm_class),
+                             &head->oimm.base);
+       if (ret)
+               goto out;
+
+       ret = nv50_dmac_create(disp->core, NV50_DISP_OVLY_CLASS, index,
+                             &(struct nv50_display_ovly_class) {
+                                       .pushbuf = EVO_PUSH_HANDLE(OVLY, index),
+                                       .head = index,
+                             }, sizeof(struct nv50_display_ovly_class),
+                             disp->sync->bo.offset, &head->ovly.base);
+       if (ret)
+               goto out;
+
+out:
+       if (ret)
+               nv50_crtc_destroy(crtc);
+       return ret;
+}
+
+/******************************************************************************
+ * DAC
+ *****************************************************************************/
+static void
+nv50_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nv50_disp *disp = nv50_disp(encoder->dev);
+       int or = nv_encoder->or;
+       u32 dpms_ctrl;
+
+       dpms_ctrl = 0x00000000;
+       if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
+               dpms_ctrl |= 0x00000001;
+       if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
+               dpms_ctrl |= 0x00000004;
+
+       nv_call(disp->core, NV50_DISP_DAC_PWR + or, dpms_ctrl);
+}
+
+static bool
+nv50_dac_mode_fixup(struct drm_encoder *encoder,
+                   const struct drm_display_mode *mode,
+                   struct drm_display_mode *adjusted_mode)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_connector *nv_connector;
+
+       nv_connector = nouveau_encoder_connector_get(nv_encoder);
+       if (nv_connector && nv_connector->native_mode) {
+               if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+                       int id = adjusted_mode->base.id;
+                       *adjusted_mode = *nv_connector->native_mode;
+                       adjusted_mode->base.id = id;
+               }
+       }
+
+       return true;
+}
+
+static void
+nv50_dac_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+                 struct drm_display_mode *adjusted_mode)
+{
+       struct nv50_mast *mast = nv50_mast(encoder->dev);
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+       u32 *push;
+
+       nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+       push = evo_wait(mast, 8);
+       if (push) {
+               if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+                       u32 syncs = 0x00000000;
+
+                       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+                               syncs |= 0x00000001;
+                       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+                               syncs |= 0x00000002;
+
+                       evo_mthd(push, 0x0400 + (nv_encoder->or * 0x080), 2);
+                       evo_data(push, 1 << nv_crtc->index);
+                       evo_data(push, syncs);
+               } else {
+                       u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
+                       u32 syncs = 0x00000001;
+
+                       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+                               syncs |= 0x00000008;
+                       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+                               syncs |= 0x00000010;
+
+                       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+                               magic |= 0x00000001;
+
+                       evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
+                       evo_data(push, syncs);
+                       evo_data(push, magic);
+                       evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 1);
+                       evo_data(push, 1 << nv_crtc->index);
+               }
+
+               evo_kick(push, mast);
+       }
+
+       nv_encoder->crtc = encoder->crtc;
+}
+
+static void
+nv50_dac_disconnect(struct drm_encoder *encoder)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nv50_mast *mast = nv50_mast(encoder->dev);
+       const int or = nv_encoder->or;
+       u32 *push;
+
+       if (nv_encoder->crtc) {
+               nv50_crtc_prepare(nv_encoder->crtc);
+
+               push = evo_wait(mast, 4);
+               if (push) {
+                       if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+                               evo_mthd(push, 0x0400 + (or * 0x080), 1);
+                               evo_data(push, 0x00000000);
+                       } else {
+                               evo_mthd(push, 0x0180 + (or * 0x020), 1);
+                               evo_data(push, 0x00000000);
+                       }
+
+                       evo_mthd(push, 0x0080, 1);
+                       evo_data(push, 0x00000000);
+                       evo_kick(push, mast);
+               }
+       }
+
+       nv_encoder->crtc = NULL;
+}
+
+static enum drm_connector_status
+nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+       struct nv50_disp *disp = nv50_disp(encoder->dev);
+       int ret, or = nouveau_encoder(encoder)->or;
+       u32 load = 0;
+
+       ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load));
+       if (ret || load != 7)
+               return connector_status_disconnected;
+
+       return connector_status_connected;
+}
+
+static void
+nv50_dac_destroy(struct drm_encoder *encoder)
+{
+       drm_encoder_cleanup(encoder);
+       kfree(encoder);
+}
+
+static const struct drm_encoder_helper_funcs nv50_dac_hfunc = {
+       .dpms = nv50_dac_dpms,
+       .mode_fixup = nv50_dac_mode_fixup,
+       .prepare = nv50_dac_disconnect,
+       .commit = nv50_dac_commit,
+       .mode_set = nv50_dac_mode_set,
+       .disable = nv50_dac_disconnect,
+       .get_crtc = nv50_display_crtc_get,
+       .detect = nv50_dac_detect
+};
+
+static const struct drm_encoder_funcs nv50_dac_func = {
+       .destroy = nv50_dac_destroy,
+};
+
+static int
+nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+       struct drm_device *dev = connector->dev;
+       struct nouveau_encoder *nv_encoder;
        struct drm_encoder *encoder;
-       u32 tmp;
 
-       if (dcb->type != DCB_OUTPUT_TMDS)
+       nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+       if (!nv_encoder)
+               return -ENOMEM;
+       nv_encoder->dcb = dcbe;
+       nv_encoder->or = ffs(dcbe->or) - 1;
+
+       encoder = to_drm_encoder(nv_encoder);
+       encoder->possible_crtcs = dcbe->heads;
+       encoder->possible_clones = 0;
+       drm_encoder_init(dev, encoder, &nv50_dac_func, DRM_MODE_ENCODER_DAC);
+       drm_encoder_helper_add(encoder, &nv50_dac_hfunc);
+
+       drm_mode_connector_attach_encoder(connector, encoder);
+       return 0;
+}
+
+/******************************************************************************
+ * Audio
+ *****************************************************************************/
+static void
+nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_connector *nv_connector;
+       struct nv50_disp *disp = nv50_disp(encoder->dev);
+
+       nv_connector = nouveau_encoder_connector_get(nv_encoder);
+       if (!drm_detect_monitor_audio(nv_connector->edid))
                return;
 
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-               struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
+
+       nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or,
+                           nv_connector->base.eld,
+                           nv_connector->base.eld[2] * 4);
+}
+
+static void
+nv50_audio_disconnect(struct drm_encoder *encoder)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nv50_disp *disp = nv50_disp(encoder->dev);
+
+       nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, NULL, 0);
+}
+
+/******************************************************************************
+ * HDMI
+ *****************************************************************************/
+static void
+nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+       struct nouveau_connector *nv_connector;
+       struct nv50_disp *disp = nv50_disp(encoder->dev);
+       const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
+       u32 rekey = 56; /* binary driver, and tegra constant */
+       u32 max_ac_packet;
+
+       nv_connector = nouveau_encoder_connector_get(nv_encoder);
+       if (!drm_detect_hdmi_monitor(nv_connector->edid))
+               return;
+
+       max_ac_packet  = mode->htotal - mode->hdisplay;
+       max_ac_packet -= rekey;
+       max_ac_packet -= 18; /* constant from tegra */
+       max_ac_packet /= 32;
+
+       nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff,
+                           NV84_DISP_SOR_HDMI_PWR_STATE_ON |
+                           (max_ac_packet << 16) | rekey);
+
+       nv50_audio_mode_set(encoder, mode);
+}
+
+static void
+nv50_hdmi_disconnect(struct drm_encoder *encoder)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
+       struct nv50_disp *disp = nv50_disp(encoder->dev);
+       const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
+
+       nv50_audio_disconnect(encoder);
+
+       nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, 0x00000000);
+}
+
+/******************************************************************************
+ * SOR
+ *****************************************************************************/
+static void
+nv50_sor_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct drm_device *dev = encoder->dev;
+       struct nv50_disp *disp = nv50_disp(dev);
+       struct drm_encoder *partner;
+       int or = nv_encoder->or;
+
+       nv_encoder->last_dpms = mode;
 
-               if (nv_encoder->dcb->type == DCB_OUTPUT_DP &&
-                   nv_encoder->dcb->or & (1 << or)) {
-                       tmp  = nv_rd32(device, NV50_SOR_DP_CTRL(or, link));
-                       tmp &= ~NV50_SOR_DP_CTRL_ENABLED;
-                       nv_wr32(device, NV50_SOR_DP_CTRL(or, link), tmp);
+       list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
+               struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
+
+               if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
+                       continue;
+
+               if (nv_partner != nv_encoder &&
+                   nv_partner->dcb->or == nv_encoder->dcb->or) {
+                       if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
+                               return;
                        break;
                }
        }
+
+       nv_call(disp->core, NV50_DISP_SOR_PWR + or, (mode == DRM_MODE_DPMS_ON));
+
+       if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
+               nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, disp->core);
+}
+
+static bool
+nv50_sor_mode_fixup(struct drm_encoder *encoder,
+                   const struct drm_display_mode *mode,
+                   struct drm_display_mode *adjusted_mode)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_connector *nv_connector;
+
+       nv_connector = nouveau_encoder_connector_get(nv_encoder);
+       if (nv_connector && nv_connector->native_mode) {
+               if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+                       int id = adjusted_mode->base.id;
+                       *adjusted_mode = *nv_connector->native_mode;
+                       adjusted_mode->base.id = id;
+               }
+       }
+
+       return true;
 }
 
 static void
-nv50_display_unk40_handler(struct drm_device *dev)
+nv50_sor_disconnect(struct drm_encoder *encoder)
 {
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nv50_display *disp = nv50_display(dev);
-       struct dcb_output *dcb = disp->irq.dcb;
-       u16 script = disp->irq.script;
-       u32 unk30 = nv_rd32(device, 0x610030), pclk = disp->irq.pclk;
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nv50_mast *mast = nv50_mast(encoder->dev);
+       const int or = nv_encoder->or;
+       u32 *push;
+
+       if (nv_encoder->crtc) {
+               nv50_crtc_prepare(nv_encoder->crtc);
+
+               push = evo_wait(mast, 4);
+               if (push) {
+                       if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+                               evo_mthd(push, 0x0600 + (or * 0x40), 1);
+                               evo_data(push, 0x00000000);
+                       } else {
+                               evo_mthd(push, 0x0200 + (or * 0x20), 1);
+                               evo_data(push, 0x00000000);
+                       }
 
-       NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
-       disp->irq.dcb = NULL;
-       if (!dcb)
-               goto ack;
+                       evo_mthd(push, 0x0080, 1);
+                       evo_data(push, 0x00000000);
+                       evo_kick(push, mast);
+               }
 
-       nouveau_bios_run_display_table(dev, script, -pclk, dcb, -1);
-       nv50_display_unk40_dp_set_tmds(dev, dcb);
+               nv50_hdmi_disconnect(encoder);
+       }
 
-ack:
-       nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
-       nv_wr32(device, 0x610030, 0x80000000);
-       nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) | 8);
+       nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
+       nv_encoder->crtc = NULL;
 }
 
 static void
-nv50_display_bh(unsigned long data)
+nv50_sor_prepare(struct drm_encoder *encoder)
 {
-       struct drm_device *dev = (struct drm_device *)data;
-       struct nouveau_device *device = nouveau_dev(dev);
+       nv50_sor_disconnect(encoder);
+       if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
+               evo_sync(encoder->dev);
+}
+
+static void
+nv50_sor_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
+                 struct drm_display_mode *mode)
+{
+       struct nv50_disp *disp = nv50_disp(encoder->dev);
+       struct nv50_mast *mast = nv50_mast(encoder->dev);
+       struct drm_device *dev = encoder->dev;
        struct nouveau_drm *drm = nouveau_drm(dev);
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+       struct nouveau_connector *nv_connector;
+       struct nvbios *bios = &drm->vbios;
+       u32 *push, lvds = 0;
+       u8 owner = 1 << nv_crtc->index;
+       u8 proto = 0xf;
+       u8 depth = 0x0;
 
-       for (;;) {
-               uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0);
-               uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1);
+       nv_connector = nouveau_encoder_connector_get(nv_encoder);
+       switch (nv_encoder->dcb->type) {
+       case DCB_OUTPUT_TMDS:
+               if (nv_encoder->dcb->sorconf.link & 1) {
+                       if (mode->clock < 165000)
+                               proto = 0x1;
+                       else
+                               proto = 0x5;
+               } else {
+                       proto = 0x2;
+               }
+
+               nv50_hdmi_mode_set(encoder, mode);
+               break;
+       case DCB_OUTPUT_LVDS:
+               proto = 0x0;
 
-               NV_DEBUG(drm, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
+               if (bios->fp_no_ddc) {
+                       if (bios->fp.dual_link)
+                               lvds |= 0x0100;
+                       if (bios->fp.if_is_24bit)
+                               lvds |= 0x0200;
+               } else {
+                       if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
+                               if (((u8 *)nv_connector->edid)[121] == 2)
+                                       lvds |= 0x0100;
+                       } else
+                       if (mode->clock >= bios->fp.duallink_transition_clk) {
+                               lvds |= 0x0100;
+                       }
 
-               if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
-                       nv50_display_unk10_handler(dev);
-               else
-               if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK20)
-                       nv50_display_unk20_handler(dev);
-               else
-               if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK40)
-                       nv50_display_unk40_handler(dev);
+                       if (lvds & 0x0100) {
+                               if (bios->fp.strapless_is_24bit & 2)
+                                       lvds |= 0x0200;
+                       } else {
+                               if (bios->fp.strapless_is_24bit & 1)
+                                       lvds |= 0x0200;
+                       }
+
+                       if (nv_connector->base.display_info.bpc == 8)
+                               lvds |= 0x0200;
+               }
+
+               nv_call(disp->core, NV50_DISP_SOR_LVDS_SCRIPT + nv_encoder->or, lvds);
+               break;
+       case DCB_OUTPUT_DP:
+               if (nv_connector->base.display_info.bpc == 6) {
+                       nv_encoder->dp.datarate = mode->clock * 18 / 8;
+                       depth = 0x2;
+               } else
+               if (nv_connector->base.display_info.bpc == 8) {
+                       nv_encoder->dp.datarate = mode->clock * 24 / 8;
+                       depth = 0x5;
+               } else {
+                       nv_encoder->dp.datarate = mode->clock * 30 / 8;
+                       depth = 0x6;
+               }
+
+               if (nv_encoder->dcb->sorconf.link & 1)
+                       proto = 0x8;
                else
-                       break;
+                       proto = 0x9;
+               break;
+       default:
+               BUG_ON(1);
+               break;
        }
 
-       nv_wr32(device, NV03_PMC_INTR_EN_0, 1);
+       nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
+
+       push = evo_wait(nv50_mast(dev), 8);
+       if (push) {
+               if (nv50_vers(mast) < NVD0_DISP_CLASS) {
+                       evo_mthd(push, 0x0600 + (nv_encoder->or * 0x040), 1);
+                       evo_data(push, (depth << 16) | (proto << 8) | owner);
+               } else {
+                       u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
+                       u32 syncs = 0x00000001;
+
+                       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+                               syncs |= 0x00000008;
+                       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+                               syncs |= 0x00000010;
+
+                       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+                               magic |= 0x00000001;
+
+                       evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
+                       evo_data(push, syncs | (depth << 6));
+                       evo_data(push, magic);
+                       evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 1);
+                       evo_data(push, owner | (proto << 8));
+               }
+
+               evo_kick(push, mast);
+       }
+
+       nv_encoder->crtc = encoder->crtc;
 }
 
 static void
-nv50_display_error_handler(struct drm_device *dev)
+nv50_sor_destroy(struct drm_encoder *encoder)
 {
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       u32 channels = (nv_rd32(device, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16;
-       u32 addr, data;
-       int chid;
+       drm_encoder_cleanup(encoder);
+       kfree(encoder);
+}
 
-       for (chid = 0; chid < 5; chid++) {
-               if (!(channels & (1 << chid)))
-                       continue;
+static const struct drm_encoder_helper_funcs nv50_sor_hfunc = {
+       .dpms = nv50_sor_dpms,
+       .mode_fixup = nv50_sor_mode_fixup,
+       .prepare = nv50_sor_prepare,
+       .commit = nv50_sor_commit,
+       .mode_set = nv50_sor_mode_set,
+       .disable = nv50_sor_disconnect,
+       .get_crtc = nv50_display_crtc_get,
+};
+
+static const struct drm_encoder_funcs nv50_sor_func = {
+       .destroy = nv50_sor_destroy,
+};
+
+static int
+nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+       struct drm_device *dev = connector->dev;
+       struct nouveau_encoder *nv_encoder;
+       struct drm_encoder *encoder;
+
+       nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+       if (!nv_encoder)
+               return -ENOMEM;
+       nv_encoder->dcb = dcbe;
+       nv_encoder->or = ffs(dcbe->or) - 1;
+       nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
 
-               nv_wr32(device, NV50_PDISPLAY_INTR_0, 0x00010000 << chid);
-               addr = nv_rd32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid));
-               data = nv_rd32(device, NV50_PDISPLAY_TRAPPED_DATA(chid));
-               NV_ERROR(drm, "EvoCh %d Mthd 0x%04x Data 0x%08x "
-                             "(0x%04x 0x%02x)\n", chid,
-                        addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
+       encoder = to_drm_encoder(nv_encoder);
+       encoder->possible_crtcs = dcbe->heads;
+       encoder->possible_clones = 0;
+       drm_encoder_init(dev, encoder, &nv50_sor_func, DRM_MODE_ENCODER_TMDS);
+       drm_encoder_helper_add(encoder, &nv50_sor_hfunc);
+
+       drm_mode_connector_attach_encoder(connector, encoder);
+       return 0;
+}
+
+/******************************************************************************
+ * Init
+ *****************************************************************************/
+void
+nv50_display_fini(struct drm_device *dev)
+{
+}
 
-               nv_wr32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000);
+int
+nv50_display_init(struct drm_device *dev)
+{
+       u32 *push = evo_wait(nv50_mast(dev), 32);
+       if (push) {
+               evo_mthd(push, 0x0088, 1);
+               evo_data(push, NvEvoSync);
+               evo_kick(push, nv50_mast(dev));
+               return evo_sync(dev);
        }
+
+       return -EBUSY;
 }
 
 void
-nv50_display_intr(struct drm_device *dev)
+nv50_display_destroy(struct drm_device *dev)
 {
+       struct nv50_disp *disp = nv50_disp(dev);
+
+       nv50_dmac_destroy(disp->core, &disp->mast.base);
+
+       nouveau_bo_unmap(disp->sync);
+       if (disp->sync)
+               nouveau_bo_unpin(disp->sync);
+       nouveau_bo_ref(NULL, &disp->sync);
+
+       nouveau_display(dev)->priv = NULL;
+       kfree(disp);
+}
+
+int
+nv50_display_create(struct drm_device *dev)
+{
+       static const u16 oclass[] = {
+               NVE0_DISP_CLASS,
+               NVD0_DISP_CLASS,
+               NVA3_DISP_CLASS,
+               NV94_DISP_CLASS,
+               NVA0_DISP_CLASS,
+               NV84_DISP_CLASS,
+               NV50_DISP_CLASS,
+       };
        struct nouveau_device *device = nouveau_dev(dev);
        struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nv50_display *disp = nv50_display(dev);
-       uint32_t delayed = 0;
-
-       while (nv_rd32(device, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
-               uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0);
-               uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1);
-               uint32_t clock;
+       struct dcb_table *dcb = &drm->vbios.dcb;
+       struct drm_connector *connector, *tmp;
+       struct nv50_disp *disp;
+       struct dcb_output *dcbe;
+       int crtcs, ret, i;
 
-               NV_DEBUG(drm, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
+       disp = kzalloc(sizeof(*disp), GFP_KERNEL);
+       if (!disp)
+               return -ENOMEM;
 
-               if (!intr0 && !(intr1 & ~delayed))
-                       break;
+       nouveau_display(dev)->priv = disp;
+       nouveau_display(dev)->dtor = nv50_display_destroy;
+       nouveau_display(dev)->init = nv50_display_init;
+       nouveau_display(dev)->fini = nv50_display_fini;
 
-               if (intr0 & 0x001f0000) {
-                       nv50_display_error_handler(dev);
-                       intr0 &= ~0x001f0000;
+       /* small shared memory area we use for notifiers and semaphores */
+       ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+                            0, 0x0000, NULL, &disp->sync);
+       if (!ret) {
+               ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
+               if (!ret) {
+                       ret = nouveau_bo_map(disp->sync);
+                       if (ret)
+                               nouveau_bo_unpin(disp->sync);
                }
+               if (ret)
+                       nouveau_bo_ref(NULL, &disp->sync);
+       }
 
-               if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
-                       intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
-                       delayed |= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
-               }
+       if (ret)
+               goto out;
+
+       /* attempt to allocate a supported evo display class */
+       ret = -ENODEV;
+       for (i = 0; ret && i < ARRAY_SIZE(oclass); i++) {
+               ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE,
+                                        0xd1500000, oclass[i], NULL, 0,
+                                        &disp->core);
+       }
 
-               clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 |
-                                 NV50_PDISPLAY_INTR_1_CLK_UNK20 |
-                                 NV50_PDISPLAY_INTR_1_CLK_UNK40));
-               if (clock) {
-                       nv_wr32(device, NV03_PMC_INTR_EN_0, 0);
-                       tasklet_schedule(&disp->tasklet);
-                       delayed |= clock;
-                       intr1 &= ~clock;
-               }
+       if (ret)
+               goto out;
+
+       /* allocate master evo channel */
+       ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0,
+                             &(struct nv50_display_mast_class) {
+                                       .pushbuf = EVO_PUSH_HANDLE(MAST, 0),
+                             }, sizeof(struct nv50_display_mast_class),
+                             disp->sync->bo.offset, &disp->mast.base);
+       if (ret)
+               goto out;
+
+       /* create crtc objects to represent the hw heads */
+       if (nv_mclass(disp->core) >= NVD0_DISP_CLASS)
+               crtcs = nv_rd32(device, 0x022448);
+       else
+               crtcs = 2;
 
-               if (intr0) {
-                       NV_ERROR(drm, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0);
-                       nv_wr32(device, NV50_PDISPLAY_INTR_0, intr0);
+       for (i = 0; i < crtcs; i++) {
+               ret = nv50_crtc_create(dev, disp->core, i);
+               if (ret)
+                       goto out;
+       }
+
+       /* create encoder/connector objects based on VBIOS DCB table */
+       for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
+               connector = nouveau_connector_create(dev, dcbe->connector);
+               if (IS_ERR(connector))
+                       continue;
+
+               if (dcbe->location != DCB_LOC_ON_CHIP) {
+                       NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
+                               dcbe->type, ffs(dcbe->or) - 1);
+                       continue;
                }
 
-               if (intr1) {
-                       NV_ERROR(drm,
-                                "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1);
-                       nv_wr32(device, NV50_PDISPLAY_INTR_1, intr1);
+               switch (dcbe->type) {
+               case DCB_OUTPUT_TMDS:
+               case DCB_OUTPUT_LVDS:
+               case DCB_OUTPUT_DP:
+                       nv50_sor_create(connector, dcbe);
+                       break;
+               case DCB_OUTPUT_ANALOG:
+                       nv50_dac_create(connector, dcbe);
+                       break;
+               default:
+                       NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
+                               dcbe->type, ffs(dcbe->or) - 1);
+                       continue;
                }
        }
+
+       /* cull any connectors we created that don't have an encoder */
+       list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
+               if (connector->encoder_ids[0])
+                       continue;
+
+               NV_WARN(drm, "%s has no encoders, removing\n",
+                       drm_get_connector_name(connector));
+               connector->funcs->destroy(connector);
+       }
+
+out:
+       if (ret)
+               nv50_display_destroy(dev);
+       return ret;
 }
index 973554d..70da347 100644 (file)
 #include "nouveau_display.h"
 #include "nouveau_crtc.h"
 #include "nouveau_reg.h"
-#include "nv50_evo.h"
 
-struct nv50_display_crtc {
-       struct nouveau_channel *sync;
-       struct {
-               struct nouveau_bo *bo;
-               u32 offset;
-               u16 value;
-       } sem;
-};
+int  nv50_display_create(struct drm_device *);
+void nv50_display_destroy(struct drm_device *);
+int  nv50_display_init(struct drm_device *);
+void nv50_display_fini(struct drm_device *);
 
-struct nv50_display {
-       struct nouveau_channel *master;
-
-       struct nouveau_gpuobj *ramin;
-       u32 dmao;
-       u32 hash;
-
-       struct nv50_display_crtc crtc[2];
-
-       struct tasklet_struct tasklet;
-       struct {
-               struct dcb_output *dcb;
-               u16 script;
-               u32 pclk;
-       } irq;
-};
-
-static inline struct nv50_display *
-nv50_display(struct drm_device *dev)
-{
-       return nouveau_display(dev)->priv;
-}
-
-int nv50_display_early_init(struct drm_device *dev);
-void nv50_display_late_takedown(struct drm_device *dev);
-int nv50_display_create(struct drm_device *dev);
-int nv50_display_init(struct drm_device *dev);
-void nv50_display_fini(struct drm_device *dev);
-void nv50_display_destroy(struct drm_device *dev);
-void nv50_display_intr(struct drm_device *);
-int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
-int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
-
-u32  nv50_display_active_crtcs(struct drm_device *);
-
-int  nv50_display_sync(struct drm_device *);
-int  nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
-                           struct nouveau_channel *chan);
 void nv50_display_flip_stop(struct drm_crtc *);
-
-int  nv50_evo_create(struct drm_device *dev);
-void nv50_evo_destroy(struct drm_device *dev);
-int  nv50_evo_init(struct drm_device *dev);
-void nv50_evo_fini(struct drm_device *dev);
-void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
-                         u64 size);
-int  nv50_evo_dmaobj_new(struct nouveau_channel *, u32 handle, u32 memtype,
-                        u64 base, u64 size, struct nouveau_gpuobj **);
-
-int  nvd0_display_create(struct drm_device *);
-void nvd0_display_destroy(struct drm_device *);
-int  nvd0_display_init(struct drm_device *);
-void nvd0_display_fini(struct drm_device *);
-void nvd0_display_intr(struct drm_device *);
-
-void nvd0_display_flip_stop(struct drm_crtc *);
-int  nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
+int  nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
                            struct nouveau_channel *, u32 swap_interval);
 
 struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head);
-struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int head);
 
 #endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
deleted file mode 100644 (file)
index 9f6f55c..0000000
+++ /dev/null
@@ -1,403 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nv50_display.h"
-
-#include <core/gpuobj.h>
-
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
-static u32
-nv50_evo_rd32(struct nouveau_object *object, u32 addr)
-{
-       void __iomem *iomem = object->oclass->ofuncs->rd08;
-       return ioread32_native(iomem + addr);
-}
-
-static void
-nv50_evo_wr32(struct nouveau_object *object, u32 addr, u32 data)
-{
-       void __iomem *iomem = object->oclass->ofuncs->rd08;
-       iowrite32_native(data, iomem + addr);
-}
-
-static void
-nv50_evo_channel_del(struct nouveau_channel **pevo)
-{
-       struct nouveau_channel *evo = *pevo;
-
-       if (!evo)
-               return;
-       *pevo = NULL;
-
-       nouveau_bo_unmap(evo->push.buffer);
-       nouveau_bo_ref(NULL, &evo->push.buffer);
-
-       if (evo->object)
-               iounmap(evo->object->oclass->ofuncs);
-
-       kfree(evo);
-}
-
-int
-nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
-                   u64 base, u64 size, struct nouveau_gpuobj **pobj)
-{
-       struct drm_device *dev = evo->fence;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nv50_display *disp = nv50_display(dev);
-       u32 dmao = disp->dmao;
-       u32 hash = disp->hash;
-       u32 flags5;
-
-       if (nv_device(drm->device)->chipset < 0xc0) {
-               /* not supported on 0x50, specified in format mthd */
-               if (nv_device(drm->device)->chipset == 0x50)
-                       memtype = 0;
-               flags5 = 0x00010000;
-       } else {
-               if (memtype & 0x80000000)
-                       flags5 = 0x00000000; /* large pages */
-               else
-                       flags5 = 0x00020000;
-       }
-
-       nv_wo32(disp->ramin, dmao + 0x00, 0x0019003d | (memtype << 22));
-       nv_wo32(disp->ramin, dmao + 0x04, lower_32_bits(base + size - 1));
-       nv_wo32(disp->ramin, dmao + 0x08, lower_32_bits(base));
-       nv_wo32(disp->ramin, dmao + 0x0c, upper_32_bits(base + size - 1) << 24 |
-                                         upper_32_bits(base));
-       nv_wo32(disp->ramin, dmao + 0x10, 0x00000000);
-       nv_wo32(disp->ramin, dmao + 0x14, flags5);
-
-       nv_wo32(disp->ramin, hash + 0x00, handle);
-       nv_wo32(disp->ramin, hash + 0x04, (evo->handle << 28) | (dmao << 10) |
-                                          evo->handle);
-
-       disp->dmao += 0x20;
-       disp->hash += 0x08;
-       return 0;
-}
-
-static int
-nv50_evo_channel_new(struct drm_device *dev, int chid,
-                    struct nouveau_channel **pevo)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nv50_display *disp = nv50_display(dev);
-       struct nouveau_channel *evo;
-       int ret;
-
-       evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
-       if (!evo)
-               return -ENOMEM;
-       *pevo = evo;
-
-       evo->drm = drm;
-       evo->handle = chid;
-       evo->fence = dev;
-       evo->user_get = 4;
-       evo->user_put = 0;
-
-       ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL,
-                            &evo->push.buffer);
-       if (ret == 0)
-               ret = nouveau_bo_pin(evo->push.buffer, TTM_PL_FLAG_VRAM);
-       if (ret) {
-               NV_ERROR(drm, "Error creating EVO DMA push buffer: %d\n", ret);
-               nv50_evo_channel_del(pevo);
-               return ret;
-       }
-
-       ret = nouveau_bo_map(evo->push.buffer);
-       if (ret) {
-               NV_ERROR(drm, "Error mapping EVO DMA push buffer: %d\n", ret);
-               nv50_evo_channel_del(pevo);
-               return ret;
-       }
-
-       evo->object = kzalloc(sizeof(*evo->object), GFP_KERNEL);
-#ifdef NOUVEAU_OBJECT_MAGIC
-       evo->object->_magic = NOUVEAU_OBJECT_MAGIC;
-#endif
-       evo->object->parent = nv_object(disp->ramin)->parent;
-       evo->object->engine = nv_object(disp->ramin)->engine;
-       evo->object->oclass =
-               kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL);
-       evo->object->oclass->ofuncs =
-               kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL);
-       evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
-       evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
-       evo->object->oclass->ofuncs->rd08 =
-               ioremap(pci_resource_start(dev->pdev, 0) +
-                       NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE);
-       return 0;
-}
-
-static int
-nv50_evo_channel_init(struct nouveau_channel *evo)
-{
-       struct nouveau_drm *drm = evo->drm;
-       struct nouveau_device *device = nv_device(drm->device);
-       int id = evo->handle, ret, i;
-       u64 pushbuf = evo->push.buffer->bo.offset;
-       u32 tmp;
-
-       tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
-       if ((tmp & 0x009f0000) == 0x00020000)
-               nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
-
-       tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
-       if ((tmp & 0x003f0000) == 0x00030000)
-               nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
-
-       /* initialise fifo */
-       nv_wr32(device, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
-                    NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
-                    NV50_PDISPLAY_EVO_DMA_CB_VALID);
-       nv_wr32(device, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
-       nv_wr32(device, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
-       nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
-                    NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
-
-       nv_wr32(device, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
-       nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
-                    NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
-       if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
-               NV_ERROR(drm, "EvoCh %d init timeout: 0x%08x\n", id,
-                        nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
-               return -EBUSY;
-       }
-
-       /* enable error reporting on the channel */
-       nv_mask(device, 0x610028, 0x00000000, 0x00010001 << id);
-
-       evo->dma.max = (4096/4) - 2;
-       evo->dma.max &= ~7;
-       evo->dma.put = 0;
-       evo->dma.cur = evo->dma.put;
-       evo->dma.free = evo->dma.max - evo->dma.cur;
-
-       ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
-       if (ret)
-               return ret;
-
-       for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
-               OUT_RING(evo, 0);
-
-       return 0;
-}
-
-static void
-nv50_evo_channel_fini(struct nouveau_channel *evo)
-{
-       struct nouveau_drm *drm = evo->drm;
-       struct nouveau_device *device = nv_device(drm->device);
-       int id = evo->handle;
-
-       nv_mask(device, 0x610028, 0x00010001 << id, 0x00000000);
-       nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
-       nv_wr32(device, NV50_PDISPLAY_INTR_0, (1 << id));
-       nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
-       if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
-               NV_ERROR(drm, "EvoCh %d takedown timeout: 0x%08x\n", id,
-                        nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
-       }
-}
-
-void
-nv50_evo_destroy(struct drm_device *dev)
-{
-       struct nv50_display *disp = nv50_display(dev);
-       int i;
-
-       for (i = 0; i < 2; i++) {
-               if (disp->crtc[i].sem.bo) {
-                       nouveau_bo_unmap(disp->crtc[i].sem.bo);
-                       nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo);
-               }
-               nv50_evo_channel_del(&disp->crtc[i].sync);
-       }
-       nv50_evo_channel_del(&disp->master);
-       nouveau_gpuobj_ref(NULL, &disp->ramin);
-}
-
-int
-nv50_evo_create(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_fb *pfb = nouveau_fb(drm->device);
-       struct nv50_display *disp = nv50_display(dev);
-       struct nouveau_channel *evo;
-       int ret, i, j;
-
-       /* setup object management on it, any other evo channel will
-        * use this also as there's no per-channel support on the
-        * hardware
-        */
-       ret = nouveau_gpuobj_new(drm->device, NULL, 32768, 65536,
-                                NVOBJ_FLAG_ZERO_ALLOC, &disp->ramin);
-       if (ret) {
-               NV_ERROR(drm, "Error allocating EVO channel memory: %d\n", ret);
-               goto err;
-       }
-
-       disp->hash = 0x0000;
-       disp->dmao = 0x1000;
-
-       /* create primary evo channel, the one we use for modesetting
-        * purporses
-        */
-       ret = nv50_evo_channel_new(dev, 0, &disp->master);
-       if (ret)
-               return ret;
-       evo = disp->master;
-
-       ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
-                                 disp->ramin->addr + 0x2000, 0x1000, NULL);
-       if (ret)
-               goto err;
-
-       /* create some default objects for the scanout memtypes we support */
-       ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
-                                 0, pfb->ram.size, NULL);
-       if (ret)
-               goto err;
-
-       ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
-                                 0, pfb->ram.size, NULL);
-       if (ret)
-               goto err;
-
-       ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
-                                 (nv_device(drm->device)->chipset < 0xc0 ? 0x7a : 0xfe),
-                                 0, pfb->ram.size, NULL);
-       if (ret)
-               goto err;
-
-       ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
-                                 (nv_device(drm->device)->chipset < 0xc0 ? 0x70 : 0xfe),
-                                 0, pfb->ram.size, NULL);
-       if (ret)
-               goto err;
-
-       /* create "display sync" channels and other structures we need
-        * to implement page flipping
-        */
-       for (i = 0; i < 2; i++) {
-               struct nv50_display_crtc *dispc = &disp->crtc[i];
-               u64 offset;
-
-               ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync);
-               if (ret)
-                       goto err;
-
-               ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
-                                    0, 0x0000, NULL, &dispc->sem.bo);
-               if (!ret) {
-                       ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
-                       if (!ret)
-                               ret = nouveau_bo_map(dispc->sem.bo);
-                       if (ret)
-                               nouveau_bo_ref(NULL, &dispc->sem.bo);
-                       offset = dispc->sem.bo->bo.offset;
-               }
-
-               if (ret)
-                       goto err;
-
-               ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000,
-                                         offset, 4096, NULL);
-               if (ret)
-                       goto err;
-
-               ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
-                                         0, pfb->ram.size, NULL);
-               if (ret)
-                       goto err;
-
-               ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
-                                         (nv_device(drm->device)->chipset < 0xc0 ?
-                                         0x7a : 0xfe),
-                                         0, pfb->ram.size, NULL);
-               if (ret)
-                       goto err;
-
-               ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
-                                         (nv_device(drm->device)->chipset < 0xc0 ?
-                                         0x70 : 0xfe),
-                                         0, pfb->ram.size, NULL);
-               if (ret)
-                       goto err;
-
-               for (j = 0; j < 4096; j += 4)
-                       nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000);
-               dispc->sem.offset = 0;
-       }
-
-       return 0;
-
-err:
-       nv50_evo_destroy(dev);
-       return ret;
-}
-
-int
-nv50_evo_init(struct drm_device *dev)
-{
-       struct nv50_display *disp = nv50_display(dev);
-       int ret, i;
-
-       ret = nv50_evo_channel_init(disp->master);
-       if (ret)
-               return ret;
-
-       for (i = 0; i < 2; i++) {
-               ret = nv50_evo_channel_init(disp->crtc[i].sync);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-void
-nv50_evo_fini(struct drm_device *dev)
-{
-       struct nv50_display *disp = nv50_display(dev);
-       int i;
-
-       for (i = 0; i < 2; i++) {
-               if (disp->crtc[i].sync)
-                       nv50_evo_channel_fini(disp->crtc[i].sync);
-       }
-
-       if (disp->master)
-               nv50_evo_channel_fini(disp->master);
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
deleted file mode 100644 (file)
index 771d879..0000000
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __NV50_EVO_H__
-#define __NV50_EVO_H__
-
-#define NV50_EVO_UPDATE                                              0x00000080
-#define NV50_EVO_UNK84                                               0x00000084
-#define NV50_EVO_UNK84_NOTIFY                                        0x40000000
-#define NV50_EVO_UNK84_NOTIFY_DISABLED                               0x00000000
-#define NV50_EVO_UNK84_NOTIFY_ENABLED                                0x40000000
-#define NV50_EVO_DMA_NOTIFY                                          0x00000088
-#define NV50_EVO_DMA_NOTIFY_HANDLE                                   0xffffffff
-#define NV50_EVO_DMA_NOTIFY_HANDLE_NONE                              0x00000000
-#define NV50_EVO_UNK8C                                               0x0000008C
-
-#define NV50_EVO_DAC(n, r)                       ((n) * 0x80 + NV50_EVO_DAC_##r)
-#define NV50_EVO_DAC_MODE_CTRL                                       0x00000400
-#define NV50_EVO_DAC_MODE_CTRL_CRTC0                                 0x00000001
-#define NV50_EVO_DAC_MODE_CTRL_CRTC1                                 0x00000002
-#define NV50_EVO_DAC_MODE_CTRL2                                      0x00000404
-#define NV50_EVO_DAC_MODE_CTRL2_NHSYNC                               0x00000001
-#define NV50_EVO_DAC_MODE_CTRL2_NVSYNC                               0x00000002
-
-#define NV50_EVO_SOR(n, r)                       ((n) * 0x40 + NV50_EVO_SOR_##r)
-#define NV50_EVO_SOR_MODE_CTRL                                       0x00000600
-#define NV50_EVO_SOR_MODE_CTRL_CRTC0                                 0x00000001
-#define NV50_EVO_SOR_MODE_CTRL_CRTC1                                 0x00000002
-#define NV50_EVO_SOR_MODE_CTRL_TMDS                                  0x00000100
-#define NV50_EVO_SOR_MODE_CTRL_TMDS_DUAL_LINK                        0x00000400
-#define NV50_EVO_SOR_MODE_CTRL_NHSYNC                                0x00001000
-#define NV50_EVO_SOR_MODE_CTRL_NVSYNC                                0x00002000
-
-#define NV50_EVO_CRTC(n, r)                    ((n) * 0x400 + NV50_EVO_CRTC_##r)
-#define NV84_EVO_CRTC(n, r)                    ((n) * 0x400 + NV84_EVO_CRTC_##r)
-#define NV50_EVO_CRTC_UNK0800                                        0x00000800
-#define NV50_EVO_CRTC_CLOCK                                          0x00000804
-#define NV50_EVO_CRTC_INTERLACE                                      0x00000808
-#define NV50_EVO_CRTC_DISPLAY_START                                  0x00000810
-#define NV50_EVO_CRTC_DISPLAY_TOTAL                                  0x00000814
-#define NV50_EVO_CRTC_SYNC_DURATION                                  0x00000818
-#define NV50_EVO_CRTC_SYNC_START_TO_BLANK_END                        0x0000081c
-#define NV50_EVO_CRTC_UNK0820                                        0x00000820
-#define NV50_EVO_CRTC_UNK0824                                        0x00000824
-#define NV50_EVO_CRTC_UNK082C                                        0x0000082c
-#define NV50_EVO_CRTC_CLUT_MODE                                      0x00000840
-/* You can't have a palette in 8 bit mode (=OFF) */
-#define NV50_EVO_CRTC_CLUT_MODE_BLANK                                0x00000000
-#define NV50_EVO_CRTC_CLUT_MODE_OFF                                  0x80000000
-#define NV50_EVO_CRTC_CLUT_MODE_ON                                   0xC0000000
-#define NV50_EVO_CRTC_CLUT_OFFSET                                    0x00000844
-#define NV84_EVO_CRTC_CLUT_DMA                                       0x0000085C
-#define NV84_EVO_CRTC_CLUT_DMA_HANDLE                                0xffffffff
-#define NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE                           0x00000000
-#define NV50_EVO_CRTC_FB_OFFSET                                      0x00000860
-#define NV50_EVO_CRTC_FB_SIZE                                        0x00000868
-#define NV50_EVO_CRTC_FB_CONFIG                                      0x0000086c
-#define NV50_EVO_CRTC_FB_CONFIG_MODE                                 0x00100000
-#define NV50_EVO_CRTC_FB_CONFIG_MODE_TILE                            0x00000000
-#define NV50_EVO_CRTC_FB_CONFIG_MODE_PITCH                           0x00100000
-#define NV50_EVO_CRTC_FB_DEPTH                                       0x00000870
-#define NV50_EVO_CRTC_FB_DEPTH_8                                     0x00001e00
-#define NV50_EVO_CRTC_FB_DEPTH_15                                    0x0000e900
-#define NV50_EVO_CRTC_FB_DEPTH_16                                    0x0000e800
-#define NV50_EVO_CRTC_FB_DEPTH_24                                    0x0000cf00
-#define NV50_EVO_CRTC_FB_DEPTH_30                                    0x0000d100
-#define NV50_EVO_CRTC_FB_DMA                                         0x00000874
-#define NV50_EVO_CRTC_FB_DMA_HANDLE                                  0xffffffff
-#define NV50_EVO_CRTC_FB_DMA_HANDLE_NONE                             0x00000000
-#define NV50_EVO_CRTC_CURSOR_CTRL                                    0x00000880
-#define NV50_EVO_CRTC_CURSOR_CTRL_HIDE                               0x05000000
-#define NV50_EVO_CRTC_CURSOR_CTRL_SHOW                               0x85000000
-#define NV50_EVO_CRTC_CURSOR_OFFSET                                  0x00000884
-#define NV84_EVO_CRTC_CURSOR_DMA                                     0x0000089c
-#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE                              0xffffffff
-#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE                         0x00000000
-#define NV50_EVO_CRTC_DITHER_CTRL                                    0x000008a0
-#define NV50_EVO_CRTC_DITHER_CTRL_OFF                                0x00000000
-#define NV50_EVO_CRTC_DITHER_CTRL_ON                                 0x00000011
-#define NV50_EVO_CRTC_SCALE_CTRL                                     0x000008a4
-#define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE                            0x00000000
-#define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE                              0x00000009
-#define NV50_EVO_CRTC_COLOR_CTRL                                     0x000008a8
-#define NV50_EVO_CRTC_COLOR_CTRL_VIBRANCE                            0x000fff00
-#define NV50_EVO_CRTC_COLOR_CTRL_HUE                                 0xfff00000
-#define NV50_EVO_CRTC_FB_POS                                         0x000008c0
-#define NV50_EVO_CRTC_REAL_RES                                       0x000008c8
-#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET                            0x000008d4
-#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(x, y) \
-       ((((unsigned)y << 16) & 0xFFFF0000) | (((unsigned)x) & 0x0000FFFF))
-/* Both of these are needed, otherwise nothing happens. */
-#define NV50_EVO_CRTC_SCALE_RES1                                     0x000008d8
-#define NV50_EVO_CRTC_SCALE_RES2                                     0x000008dc
-#define NV50_EVO_CRTC_UNK900                                         0x00000900
-#define NV50_EVO_CRTC_UNK904                                         0x00000904
-
-#endif
index e0763ea..c20f272 100644 (file)
@@ -110,8 +110,11 @@ nv50_fence_create(struct nouveau_drm *drm)
                             0, 0x0000, NULL, &priv->bo);
        if (!ret) {
                ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
-               if (!ret)
+               if (!ret) {
                        ret = nouveau_bo_map(priv->bo);
+                       if (ret)
+                               nouveau_bo_unpin(priv->bo);
+               }
                if (ret)
                        nouveau_bo_ref(NULL, &priv->bo);
        }
index c4a6503..8bd5d27 100644 (file)
@@ -546,7 +546,7 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_device *device = nouveau_dev(dev);
-       u32 crtc_mask = nv50_display_active_crtcs(dev);
+       u32 crtc_mask = 0; /*XXX: nv50_display_active_crtcs(dev); */
        struct nouveau_mem_exec_func exec = {
                .dev = dev,
                .precharge = mclk_precharge,
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
deleted file mode 100644 (file)
index b562b59..0000000
+++ /dev/null
@@ -1,530 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
-#include "nouveau_reg.h"
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_encoder.h"
-#include "nouveau_connector.h"
-#include "nouveau_crtc.h"
-#include "nv50_display.h"
-
-#include <subdev/timer.h>
-
-static u32
-nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
-       static const u8 nv50[] = { 16, 8, 0, 24 };
-       if (nv_device(drm->device)->chipset == 0xaf)
-               return nvaf[lane];
-       return nv50[lane];
-}
-
-static void
-nv50_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-       nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x0f000000, pattern << 24);
-}
-
-static void
-nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
-                     u8 lane, u8 swing, u8 preem)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-       u32 shift = nv50_sor_dp_lane_map(dev, dcb, lane);
-       u32 mask = 0x000000ff << shift;
-       u8 *table, *entry, *config;
-
-       table = nouveau_dp_bios_data(dev, dcb, &entry);
-       if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
-               NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
-               return;
-       }
-
-       config = entry + table[4];
-       while (config[0] != swing || config[1] != preem) {
-               config += table[5];
-               if (config >= entry + table[4] + entry[4] * table[5])
-                       return;
-       }
-
-       nv_mask(device, NV50_SOR_DP_UNK118(or, link), mask, config[2] << shift);
-       nv_mask(device, NV50_SOR_DP_UNK120(or, link), mask, config[3] << shift);
-       nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000ff00, config[4] << 8);
-}
-
-static void
-nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
-                    int link_nr, u32 link_bw, bool enhframe)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-       u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & ~0x001f4000;
-       u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800)) & ~0x000c0000;
-       u8 *table, *entry, mask;
-       int i;
-
-       table = nouveau_dp_bios_data(dev, dcb, &entry);
-       if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
-               NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
-               return;
-       }
-
-       entry = ROMPTR(dev, entry[10]);
-       if (entry) {
-               while (link_bw < ROM16(entry[0]) * 10)
-                       entry += 4;
-
-               nouveau_bios_run_init_table(dev, ROM16(entry[2]), dcb, crtc);
-       }
-
-       dpctrl |= ((1 << link_nr) - 1) << 16;
-       if (enhframe)
-               dpctrl |= 0x00004000;
-
-       if (link_bw > 162000)
-               clksor |= 0x00040000;
-
-       nv_wr32(device, 0x614300 + (or * 0x800), clksor);
-       nv_wr32(device, NV50_SOR_DP_CTRL(or, link), dpctrl);
-
-       mask = 0;
-       for (i = 0; i < link_nr; i++)
-               mask |= 1 << (nv50_sor_dp_lane_map(dev, dcb, i) >> 3);
-       nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000000f, mask);
-}
-
-static void
-nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & 0x000f0000;
-       u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800));
-       if (clksor & 0x000c0000)
-               *bw = 270000;
-       else
-               *bw = 162000;
-
-       if      (dpctrl > 0x00030000) *nr = 4;
-       else if (dpctrl > 0x00010000) *nr = 2;
-       else                          *nr = 1;
-}
-
-void
-nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       const u32 symbol = 100000;
-       int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
-       int TU, VTUi, VTUf, VTUa;
-       u64 link_data_rate, link_ratio, unk;
-       u32 best_diff = 64 * symbol;
-       u32 link_nr, link_bw, r;
-
-       /* calculate packed data rate for each lane */
-       nv50_sor_dp_link_get(dev, or, link, &link_nr, &link_bw);
-       link_data_rate = (clk * bpp / 8) / link_nr;
-
-       /* calculate ratio of packed data rate to link symbol rate */
-       link_ratio = link_data_rate * symbol;
-       r = do_div(link_ratio, link_bw);
-
-       for (TU = 64; TU >= 32; TU--) {
-               /* calculate average number of valid symbols in each TU */
-               u32 tu_valid = link_ratio * TU;
-               u32 calc, diff;
-
-               /* find a hw representation for the fraction.. */
-               VTUi = tu_valid / symbol;
-               calc = VTUi * symbol;
-               diff = tu_valid - calc;
-               if (diff) {
-                       if (diff >= (symbol / 2)) {
-                               VTUf = symbol / (symbol - diff);
-                               if (symbol - (VTUf * diff))
-                                       VTUf++;
-
-                               if (VTUf <= 15) {
-                                       VTUa  = 1;
-                                       calc += symbol - (symbol / VTUf);
-                               } else {
-                                       VTUa  = 0;
-                                       VTUf  = 1;
-                                       calc += symbol;
-                               }
-                       } else {
-                               VTUa  = 0;
-                               VTUf  = min((int)(symbol / diff), 15);
-                               calc += symbol / VTUf;
-                       }
-
-                       diff = calc - tu_valid;
-               } else {
-                       /* no remainder, but the hw doesn't like the fractional
-                        * part to be zero.  decrement the integer part and
-                        * have the fraction add a whole symbol back
-                        */
-                       VTUa = 0;
-                       VTUf = 1;
-                       VTUi--;
-               }
-
-               if (diff < best_diff) {
-                       best_diff = diff;
-                       bestTU = TU;
-                       bestVTUa = VTUa;
-                       bestVTUf = VTUf;
-                       bestVTUi = VTUi;
-                       if (diff == 0)
-                               break;
-               }
-       }
-
-       if (!bestTU) {
-               NV_ERROR(drm, "DP: unable to find suitable config\n");
-               return;
-       }
-
-       /* XXX close to vbios numbers, but not right */
-       unk  = (symbol - link_ratio) * bestTU;
-       unk *= link_ratio;
-       r = do_div(unk, symbol);
-       r = do_div(unk, symbol);
-       unk += 6;
-
-       nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2);
-       nv_mask(device, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 |
-                                                            bestVTUf << 16 |
-                                                            bestVTUi << 8 |
-                                                            unk);
-}
-static void
-nv50_sor_disconnect(struct drm_encoder *encoder)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_channel *evo = nv50_display(dev)->master;
-       int ret;
-
-       if (!nv_encoder->crtc)
-               return;
-       nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
-
-       NV_DEBUG(drm, "Disconnecting SOR %d\n", nv_encoder->or);
-
-       ret = RING_SPACE(evo, 4);
-       if (ret) {
-               NV_ERROR(drm, "no space while disconnecting SOR\n");
-               return;
-       }
-       BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
-       OUT_RING  (evo, 0);
-       BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
-       OUT_RING  (evo, 0);
-
-       nouveau_hdmi_mode_set(encoder, NULL);
-
-       nv_encoder->crtc = NULL;
-       nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-}
-
-static void
-nv50_sor_dpms(struct drm_encoder *encoder, int mode)
-{
-       struct nouveau_device *device = nouveau_dev(encoder->dev);
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct drm_encoder *enc;
-       uint32_t val;
-       int or = nv_encoder->or;
-
-       NV_DEBUG(drm, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode);
-
-       nv_encoder->last_dpms = mode;
-       list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
-               struct nouveau_encoder *nvenc = nouveau_encoder(enc);
-
-               if (nvenc == nv_encoder ||
-                   (nvenc->dcb->type != DCB_OUTPUT_TMDS &&
-                    nvenc->dcb->type != DCB_OUTPUT_LVDS &&
-                    nvenc->dcb->type != DCB_OUTPUT_DP) ||
-                   nvenc->dcb->or != nv_encoder->dcb->or)
-                       continue;
-
-               if (nvenc->last_dpms == DRM_MODE_DPMS_ON)
-                       return;
-       }
-
-       /* wait for it to be done */
-       if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or),
-                    NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
-               NV_ERROR(drm, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
-               NV_ERROR(drm, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
-                        nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or)));
-       }
-
-       val = nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or));
-
-       if (mode == DRM_MODE_DPMS_ON)
-               val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
-       else
-               val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
-
-       nv_wr32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
-               NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
-       if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(or),
-                    NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
-               NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
-               NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
-                        nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(or)));
-       }
-
-       if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
-               struct dp_train_func func = {
-                       .link_set = nv50_sor_dp_link_set,
-                       .train_set = nv50_sor_dp_train_set,
-                       .train_adj = nv50_sor_dp_train_adj
-               };
-
-               nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
-       }
-}
-
-static void
-nv50_sor_save(struct drm_encoder *encoder)
-{
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       NV_ERROR(drm, "!!\n");
-}
-
-static void
-nv50_sor_restore(struct drm_encoder *encoder)
-{
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       NV_ERROR(drm, "!!\n");
-}
-
-static bool
-nv50_sor_mode_fixup(struct drm_encoder *encoder,
-                   const struct drm_display_mode *mode,
-                   struct drm_display_mode *adjusted_mode)
-{
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_connector *connector;
-
-       NV_DEBUG(drm, "or %d\n", nv_encoder->or);
-
-       connector = nouveau_encoder_connector_get(nv_encoder);
-       if (!connector) {
-               NV_ERROR(drm, "Encoder has no connector\n");
-               return false;
-       }
-
-       if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
-            connector->native_mode)
-               drm_mode_copy(adjusted_mode, connector->native_mode);
-
-       return true;
-}
-
-static void
-nv50_sor_prepare(struct drm_encoder *encoder)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       nv50_sor_disconnect(encoder);
-       if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
-               /* avoid race between link training and supervisor intr */
-               nv50_display_sync(encoder->dev);
-       }
-}
-
-static void
-nv50_sor_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
-                 struct drm_display_mode *mode)
-{
-       struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
-       struct nouveau_connector *nv_connector;
-       uint32_t mode_ctl = 0;
-       int ret;
-
-       NV_DEBUG(drm, "or %d type %d -> crtc %d\n",
-                    nv_encoder->or, nv_encoder->dcb->type, crtc->index);
-       nv_encoder->crtc = encoder->crtc;
-
-       switch (nv_encoder->dcb->type) {
-       case DCB_OUTPUT_TMDS:
-               if (nv_encoder->dcb->sorconf.link & 1) {
-                       if (mode->clock < 165000)
-                               mode_ctl = 0x0100;
-                       else
-                               mode_ctl = 0x0500;
-               } else
-                       mode_ctl = 0x0200;
-
-               nouveau_hdmi_mode_set(encoder, mode);
-               break;
-       case DCB_OUTPUT_DP:
-               nv_connector = nouveau_encoder_connector_get(nv_encoder);
-               if (nv_connector && nv_connector->base.display_info.bpc == 6) {
-                       nv_encoder->dp.datarate = mode->clock * 18 / 8;
-                       mode_ctl |= 0x00020000;
-               } else {
-                       nv_encoder->dp.datarate = mode->clock * 24 / 8;
-                       mode_ctl |= 0x00050000;
-               }
-
-               if (nv_encoder->dcb->sorconf.link & 1)
-                       mode_ctl |= 0x00000800;
-               else
-                       mode_ctl |= 0x00000900;
-               break;
-       default:
-               break;
-       }
-
-       if (crtc->index == 1)
-               mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC1;
-       else
-               mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
-
-       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-               mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
-
-       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
-               mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
-
-       nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
-
-       ret = RING_SPACE(evo, 2);
-       if (ret) {
-               NV_ERROR(drm, "no space while connecting SOR\n");
-               nv_encoder->crtc = NULL;
-               return;
-       }
-       BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
-       OUT_RING(evo, mode_ctl);
-}
-
-static struct drm_crtc *
-nv50_sor_crtc_get(struct drm_encoder *encoder)
-{
-       return nouveau_encoder(encoder)->crtc;
-}
-
-static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = {
-       .dpms = nv50_sor_dpms,
-       .save = nv50_sor_save,
-       .restore = nv50_sor_restore,
-       .mode_fixup = nv50_sor_mode_fixup,
-       .prepare = nv50_sor_prepare,
-       .commit = nv50_sor_commit,
-       .mode_set = nv50_sor_mode_set,
-       .get_crtc = nv50_sor_crtc_get,
-       .detect = NULL,
-       .disable = nv50_sor_disconnect
-};
-
-static void
-nv50_sor_destroy(struct drm_encoder *encoder)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-
-       NV_DEBUG(drm, "\n");
-
-       drm_encoder_cleanup(encoder);
-
-       kfree(nv_encoder);
-}
-
-static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
-       .destroy = nv50_sor_destroy,
-};
-
-int
-nv50_sor_create(struct drm_connector *connector, struct dcb_output *entry)
-{
-       struct nouveau_encoder *nv_encoder = NULL;
-       struct drm_device *dev = connector->dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct drm_encoder *encoder;
-       int type;
-
-       NV_DEBUG(drm, "\n");
-
-       switch (entry->type) {
-       case DCB_OUTPUT_TMDS:
-       case DCB_OUTPUT_DP:
-               type = DRM_MODE_ENCODER_TMDS;
-               break;
-       case DCB_OUTPUT_LVDS:
-               type = DRM_MODE_ENCODER_LVDS;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
-       if (!nv_encoder)
-               return -ENOMEM;
-       encoder = to_drm_encoder(nv_encoder);
-
-       nv_encoder->dcb = entry;
-       nv_encoder->or = ffs(entry->or) - 1;
-       nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-
-       drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type);
-       drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs);
-
-       encoder->possible_crtcs = entry->heads;
-       encoder->possible_clones = 0;
-
-       drm_mode_connector_attach_encoder(connector, encoder);
-       return 0;
-}
index 53299ea..2a56b1b 100644 (file)
@@ -114,17 +114,9 @@ nvc0_fence_context_del(struct nouveau_channel *chan)
        struct nvc0_fence_chan *fctx = chan->fence;
        int i;
 
-       if (nv_device(chan->drm->device)->card_type >= NV_D0) {
-               for (i = 0; i < dev->mode_config.num_crtc; i++) {
-                       struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
-                       nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
-               }
-       } else
-       if (nv_device(chan->drm->device)->card_type >= NV_50) {
-               for (i = 0; i < dev->mode_config.num_crtc; i++) {
-                       struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
-                       nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
-               }
+       for (i = 0; i < dev->mode_config.num_crtc; i++) {
+               struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+               nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
        }
 
        nouveau_bo_vma_del(priv->bo, &fctx->vma);
@@ -154,12 +146,7 @@ nvc0_fence_context_new(struct nouveau_channel *chan)
 
        /* map display semaphore buffers into channel's vm */
        for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
-               struct nouveau_bo *bo;
-               if (nv_device(chan->drm->device)->card_type >= NV_D0)
-                       bo = nvd0_display_crtc_sema(chan->drm->dev, i);
-               else
-                       bo = nv50_display_crtc_sema(chan->drm->dev, i);
-
+               struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
                ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
        }
 
@@ -203,6 +190,8 @@ nvc0_fence_destroy(struct nouveau_drm *drm)
 {
        struct nvc0_fence_priv *priv = drm->fence;
        nouveau_bo_unmap(priv->bo);
+       if (priv->bo)
+               nouveau_bo_unpin(priv->bo);
        nouveau_bo_ref(NULL, &priv->bo);
        drm->fence = NULL;
        kfree(priv);
@@ -232,8 +221,11 @@ nvc0_fence_create(struct nouveau_drm *drm)
                             TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
        if (ret == 0) {
                ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
-               if (ret == 0)
+               if (ret == 0) {
                        ret = nouveau_bo_map(priv->bo);
+                       if (ret)
+                               nouveau_bo_unpin(priv->bo);
+               }
                if (ret)
                        nouveau_bo_ref(NULL, &priv->bo);
        }
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
deleted file mode 100644 (file)
index c402fca..0000000
+++ /dev/null
@@ -1,2141 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/dma-mapping.h>
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_gem.h"
-#include "nouveau_connector.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-#include "nouveau_fence.h"
-#include "nv50_display.h"
-
-#include <core/gpuobj.h>
-
-#include <subdev/timer.h>
-#include <subdev/bar.h>
-#include <subdev/fb.h>
-
-#define EVO_DMA_NR 9
-
-#define EVO_MASTER  (0x00)
-#define EVO_FLIP(c) (0x01 + (c))
-#define EVO_OVLY(c) (0x05 + (c))
-#define EVO_OIMM(c) (0x09 + (c))
-#define EVO_CURS(c) (0x0d + (c))
-
-/* offsets in shared sync bo of various structures */
-#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
-#define EVO_MAST_NTFY     EVO_SYNC(  0, 0x00)
-#define EVO_FLIP_SEM0(c)  EVO_SYNC((c), 0x00)
-#define EVO_FLIP_SEM1(c)  EVO_SYNC((c), 0x10)
-
-struct evo {
-       int idx;
-       dma_addr_t handle;
-       u32 *ptr;
-       struct {
-               u32 offset;
-               u16 value;
-       } sem;
-};
-
-struct nvd0_display {
-       struct nouveau_gpuobj *mem;
-       struct nouveau_bo *sync;
-       struct evo evo[9];
-
-       struct tasklet_struct tasklet;
-       u32 modeset;
-};
-
-static struct nvd0_display *
-nvd0_display(struct drm_device *dev)
-{
-       return nouveau_display(dev)->priv;
-}
-
-static struct drm_crtc *
-nvd0_display_crtc_get(struct drm_encoder *encoder)
-{
-       return nouveau_encoder(encoder)->crtc;
-}
-
-/******************************************************************************
- * EVO channel helpers
- *****************************************************************************/
-static inline int
-evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       int ret = 0;
-       nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000001);
-       nv_wr32(device, 0x610704 + (id * 0x10), data);
-       nv_mask(device, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd);
-       if (!nv_wait(device, 0x610704 + (id * 0x10), 0x80000000, 0x00000000))
-               ret = -EBUSY;
-       nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000000);
-       return ret;
-}
-
-static u32 *
-evo_wait(struct drm_device *dev, int id, int nr)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nvd0_display *disp = nvd0_display(dev);
-       u32 put = nv_rd32(device, 0x640000 + (id * 0x1000)) / 4;
-
-       if (put + nr >= (PAGE_SIZE / 4)) {
-               disp->evo[id].ptr[put] = 0x20000000;
-
-               nv_wr32(device, 0x640000 + (id * 0x1000), 0x00000000);
-               if (!nv_wait(device, 0x640004 + (id * 0x1000), ~0, 0x00000000)) {
-                       NV_ERROR(drm, "evo %d dma stalled\n", id);
-                       return NULL;
-               }
-
-               put = 0;
-       }
-
-       return disp->evo[id].ptr + put;
-}
-
-static void
-evo_kick(u32 *push, struct drm_device *dev, int id)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nvd0_display *disp = nvd0_display(dev);
-
-       nv_wr32(device, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
-}
-
-#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
-#define evo_data(p,d)   *((p)++) = (d)
-
-static int
-evo_init_dma(struct drm_device *dev, int ch)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nvd0_display *disp = nvd0_display(dev);
-       u32 flags;
-
-       flags = 0x00000000;
-       if (ch == EVO_MASTER)
-               flags |= 0x01000000;
-
-       nv_wr32(device, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3);
-       nv_wr32(device, 0x610498 + (ch * 0x0010), 0x00010000);
-       nv_wr32(device, 0x61049c + (ch * 0x0010), 0x00000001);
-       nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
-       nv_wr32(device, 0x640000 + (ch * 0x1000), 0x00000000);
-       nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000013 | flags);
-       if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) {
-               NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
-                             nv_rd32(device, 0x610490 + (ch * 0x0010)));
-               return -EBUSY;
-       }
-
-       nv_mask(device, 0x610090, (1 << ch), (1 << ch));
-       nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
-       return 0;
-}
-
-static void
-evo_fini_dma(struct drm_device *dev, int ch)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-
-       if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000010))
-               return;
-
-       nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000);
-       nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000);
-       nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000);
-       nv_mask(device, 0x610090, (1 << ch), 0x00000000);
-       nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
-}
-
-static inline void
-evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       nv_wr32(device, 0x640000 + (ch * 0x1000) + mthd, data);
-}
-
-static int
-evo_init_pio(struct drm_device *dev, int ch)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-
-       nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000001);
-       if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) {
-               NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
-                             nv_rd32(device, 0x610490 + (ch * 0x0010)));
-               return -EBUSY;
-       }
-
-       nv_mask(device, 0x610090, (1 << ch), (1 << ch));
-       nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
-       return 0;
-}
-
-static void
-evo_fini_pio(struct drm_device *dev, int ch)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-
-       if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000001))
-               return;
-
-       nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
-       nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000);
-       nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000);
-       nv_mask(device, 0x610090, (1 << ch), 0x00000000);
-       nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
-}
-
-static bool
-evo_sync_wait(void *data)
-{
-       return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
-}
-
-static int
-evo_sync(struct drm_device *dev, int ch)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nvd0_display *disp = nvd0_display(dev);
-       u32 *push = evo_wait(dev, ch, 8);
-       if (push) {
-               nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
-               evo_mthd(push, 0x0084, 1);
-               evo_data(push, 0x80000000 | EVO_MAST_NTFY);
-               evo_mthd(push, 0x0080, 2);
-               evo_data(push, 0x00000000);
-               evo_data(push, 0x00000000);
-               evo_kick(push, dev, ch);
-               if (nv_wait_cb(device, evo_sync_wait, disp->sync))
-                       return 0;
-       }
-
-       return -EBUSY;
-}
-
-/******************************************************************************
- * Page flipping channel
- *****************************************************************************/
-struct nouveau_bo *
-nvd0_display_crtc_sema(struct drm_device *dev, int crtc)
-{
-       return nvd0_display(dev)->sync;
-}
-
-void
-nvd0_display_flip_stop(struct drm_crtc *crtc)
-{
-       struct nvd0_display *disp = nvd0_display(crtc->dev);
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
-       u32 *push;
-
-       push = evo_wait(crtc->dev, evo->idx, 8);
-       if (push) {
-               evo_mthd(push, 0x0084, 1);
-               evo_data(push, 0x00000000);
-               evo_mthd(push, 0x0094, 1);
-               evo_data(push, 0x00000000);
-               evo_mthd(push, 0x00c0, 1);
-               evo_data(push, 0x00000000);
-               evo_mthd(push, 0x0080, 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, crtc->dev, evo->idx);
-       }
-}
-
-int
-nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
-                      struct nouveau_channel *chan, u32 swap_interval)
-{
-       struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
-       struct nvd0_display *disp = nvd0_display(crtc->dev);
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
-       u64 offset;
-       u32 *push;
-       int ret;
-
-       swap_interval <<= 4;
-       if (swap_interval == 0)
-               swap_interval |= 0x100;
-
-       push = evo_wait(crtc->dev, evo->idx, 128);
-       if (unlikely(push == NULL))
-               return -EBUSY;
-
-       /* synchronise with the rendering channel, if necessary */
-       if (likely(chan)) {
-               ret = RING_SPACE(chan, 10);
-               if (ret)
-                       return ret;
-
-
-               offset  = nvc0_fence_crtc(chan, nv_crtc->index);
-               offset += evo->sem.offset;
-
-               BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
-               OUT_RING  (chan, upper_32_bits(offset));
-               OUT_RING  (chan, lower_32_bits(offset));
-               OUT_RING  (chan, 0xf00d0000 | evo->sem.value);
-               OUT_RING  (chan, 0x1002);
-               BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
-               OUT_RING  (chan, upper_32_bits(offset));
-               OUT_RING  (chan, lower_32_bits(offset ^ 0x10));
-               OUT_RING  (chan, 0x74b1e000);
-               OUT_RING  (chan, 0x1001);
-               FIRE_RING (chan);
-       } else {
-               nouveau_bo_wr32(disp->sync, evo->sem.offset / 4,
-                               0xf00d0000 | evo->sem.value);
-               evo_sync(crtc->dev, EVO_MASTER);
-       }
-
-       /* queue the flip */
-       evo_mthd(push, 0x0100, 1);
-       evo_data(push, 0xfffe0000);
-       evo_mthd(push, 0x0084, 1);
-       evo_data(push, swap_interval);
-       if (!(swap_interval & 0x00000100)) {
-               evo_mthd(push, 0x00e0, 1);
-               evo_data(push, 0x40000000);
-       }
-       evo_mthd(push, 0x0088, 4);
-       evo_data(push, evo->sem.offset);
-       evo_data(push, 0xf00d0000 | evo->sem.value);
-       evo_data(push, 0x74b1e000);
-       evo_data(push, NvEvoSync);
-       evo_mthd(push, 0x00a0, 2);
-       evo_data(push, 0x00000000);
-       evo_data(push, 0x00000000);
-       evo_mthd(push, 0x00c0, 1);
-       evo_data(push, nv_fb->r_dma);
-       evo_mthd(push, 0x0110, 2);
-       evo_data(push, 0x00000000);
-       evo_data(push, 0x00000000);
-       evo_mthd(push, 0x0400, 5);
-       evo_data(push, nv_fb->nvbo->bo.offset >> 8);
-       evo_data(push, 0);
-       evo_data(push, (fb->height << 16) | fb->width);
-       evo_data(push, nv_fb->r_pitch);
-       evo_data(push, nv_fb->r_format);
-       evo_mthd(push, 0x0080, 1);
-       evo_data(push, 0x00000000);
-       evo_kick(push, crtc->dev, evo->idx);
-
-       evo->sem.offset ^= 0x10;
-       evo->sem.value++;
-       return 0;
-}
-
-/******************************************************************************
- * CRTC
- *****************************************************************************/
-static int
-nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
-{
-       struct nouveau_drm *drm = nouveau_drm(nv_crtc->base.dev);
-       struct drm_device *dev = nv_crtc->base.dev;
-       struct nouveau_connector *nv_connector;
-       struct drm_connector *connector;
-       u32 *push, mode = 0x00;
-       u32 mthd;
-
-       nv_connector = nouveau_crtc_connector_get(nv_crtc);
-       connector = &nv_connector->base;
-       if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
-               if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
-                       mode = DITHERING_MODE_DYNAMIC2X2;
-       } else {
-               mode = nv_connector->dithering_mode;
-       }
-
-       if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
-               if (connector->display_info.bpc >= 8)
-                       mode |= DITHERING_DEPTH_8BPC;
-       } else {
-               mode |= nv_connector->dithering_depth;
-       }
-
-       if (nv_device(drm->device)->card_type < NV_E0)
-               mthd = 0x0490 + (nv_crtc->index * 0x0300);
-       else
-               mthd = 0x04a0 + (nv_crtc->index * 0x0300);
-
-       push = evo_wait(dev, EVO_MASTER, 4);
-       if (push) {
-               evo_mthd(push, mthd, 1);
-               evo_data(push, mode);
-               if (update) {
-                       evo_mthd(push, 0x0080, 1);
-                       evo_data(push, 0x00000000);
-               }
-               evo_kick(push, dev, EVO_MASTER);
-       }
-
-       return 0;
-}
-
-static int
-nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
-{
-       struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
-       struct drm_device *dev = nv_crtc->base.dev;
-       struct drm_crtc *crtc = &nv_crtc->base;
-       struct nouveau_connector *nv_connector;
-       int mode = DRM_MODE_SCALE_NONE;
-       u32 oX, oY, *push;
-
-       /* start off at the resolution we programmed the crtc for, this
-        * effectively handles NONE/FULL scaling
-        */
-       nv_connector = nouveau_crtc_connector_get(nv_crtc);
-       if (nv_connector && nv_connector->native_mode)
-               mode = nv_connector->scaling_mode;
-
-       if (mode != DRM_MODE_SCALE_NONE)
-               omode = nv_connector->native_mode;
-       else
-               omode = umode;
-
-       oX = omode->hdisplay;
-       oY = omode->vdisplay;
-       if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
-               oY *= 2;
-
-       /* add overscan compensation if necessary, will keep the aspect
-        * ratio the same as the backend mode unless overridden by the
-        * user setting both hborder and vborder properties.
-        */
-       if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
-                            (nv_connector->underscan == UNDERSCAN_AUTO &&
-                             nv_connector->edid &&
-                             drm_detect_hdmi_monitor(nv_connector->edid)))) {
-               u32 bX = nv_connector->underscan_hborder;
-               u32 bY = nv_connector->underscan_vborder;
-               u32 aspect = (oY << 19) / oX;
-
-               if (bX) {
-                       oX -= (bX * 2);
-                       if (bY) oY -= (bY * 2);
-                       else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
-               } else {
-                       oX -= (oX >> 4) + 32;
-                       if (bY) oY -= (bY * 2);
-                       else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
-               }
-       }
-
-       /* handle CENTER/ASPECT scaling, taking into account the areas
-        * removed already for overscan compensation
-        */
-       switch (mode) {
-       case DRM_MODE_SCALE_CENTER:
-               oX = min((u32)umode->hdisplay, oX);
-               oY = min((u32)umode->vdisplay, oY);
-               /* fall-through */
-       case DRM_MODE_SCALE_ASPECT:
-               if (oY < oX) {
-                       u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
-                       oX = ((oY * aspect) + (aspect / 2)) >> 19;
-               } else {
-                       u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
-                       oY = ((oX * aspect) + (aspect / 2)) >> 19;
-               }
-               break;
-       default:
-               break;
-       }
-
-       push = evo_wait(dev, EVO_MASTER, 8);
-       if (push) {
-               evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
-               evo_data(push, (oY << 16) | oX);
-               evo_data(push, (oY << 16) | oX);
-               evo_data(push, (oY << 16) | oX);
-               evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
-               evo_data(push, 0x00000000);
-               evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
-               evo_data(push, (umode->vdisplay << 16) | umode->hdisplay);
-               evo_kick(push, dev, EVO_MASTER);
-               if (update) {
-                       nvd0_display_flip_stop(crtc);
-                       nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
-               }
-       }
-
-       return 0;
-}
-
-static int
-nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
-                   int x, int y, bool update)
-{
-       struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
-       u32 *push;
-
-       push = evo_wait(fb->dev, EVO_MASTER, 16);
-       if (push) {
-               evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
-               evo_data(push, nvfb->nvbo->bo.offset >> 8);
-               evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
-               evo_data(push, (fb->height << 16) | fb->width);
-               evo_data(push, nvfb->r_pitch);
-               evo_data(push, nvfb->r_format);
-               evo_data(push, nvfb->r_dma);
-               evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
-               evo_data(push, (y << 16) | x);
-               if (update) {
-                       evo_mthd(push, 0x0080, 1);
-                       evo_data(push, 0x00000000);
-               }
-               evo_kick(push, fb->dev, EVO_MASTER);
-       }
-
-       nv_crtc->fb.tile_flags = nvfb->r_dma;
-       return 0;
-}
-
-static void
-nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
-{
-       struct drm_device *dev = nv_crtc->base.dev;
-       u32 *push = evo_wait(dev, EVO_MASTER, 16);
-       if (push) {
-               if (show) {
-                       evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
-                       evo_data(push, 0x85000000);
-                       evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
-                       evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
-                       evo_data(push, NvEvoVRAM);
-               } else {
-                       evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
-                       evo_data(push, 0x05000000);
-                       evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
-                       evo_data(push, 0x00000000);
-               }
-
-               if (update) {
-                       evo_mthd(push, 0x0080, 1);
-                       evo_data(push, 0x00000000);
-               }
-
-               evo_kick(push, dev, EVO_MASTER);
-       }
-}
-
-static void
-nvd0_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-}
-
-static void
-nvd0_crtc_prepare(struct drm_crtc *crtc)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       u32 *push;
-
-       nvd0_display_flip_stop(crtc);
-
-       push = evo_wait(crtc->dev, EVO_MASTER, 2);
-       if (push) {
-               evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
-               evo_data(push, 0x00000000);
-               evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
-               evo_data(push, 0x03000000);
-               evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, crtc->dev, EVO_MASTER);
-       }
-
-       nvd0_crtc_cursor_show(nv_crtc, false, false);
-}
-
-static void
-nvd0_crtc_commit(struct drm_crtc *crtc)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       u32 *push;
-
-       push = evo_wait(crtc->dev, EVO_MASTER, 32);
-       if (push) {
-               evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
-               evo_data(push, nv_crtc->fb.tile_flags);
-               evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
-               evo_data(push, 0x83000000);
-               evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
-               evo_data(push, 0x00000000);
-               evo_data(push, 0x00000000);
-               evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
-               evo_data(push, NvEvoVRAM);
-               evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
-               evo_data(push, 0xffffff00);
-               evo_kick(push, crtc->dev, EVO_MASTER);
-       }
-
-       nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true);
-       nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
-}
-
-static bool
-nvd0_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
-                    struct drm_display_mode *adjusted_mode)
-{
-       return true;
-}
-
-static int
-nvd0_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
-{
-       struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
-       int ret;
-
-       ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
-       if (ret)
-               return ret;
-
-       if (old_fb) {
-               nvfb = nouveau_framebuffer(old_fb);
-               nouveau_bo_unpin(nvfb->nvbo);
-       }
-
-       return 0;
-}
-
-static int
-nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
-                  struct drm_display_mode *mode, int x, int y,
-                  struct drm_framebuffer *old_fb)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       struct nouveau_connector *nv_connector;
-       u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
-       u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
-       u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
-       u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
-       u32 vblan2e = 0, vblan2s = 1;
-       u32 *push;
-       int ret;
-
-       hactive = mode->htotal;
-       hsynce  = mode->hsync_end - mode->hsync_start - 1;
-       hbackp  = mode->htotal - mode->hsync_end;
-       hblanke = hsynce + hbackp;
-       hfrontp = mode->hsync_start - mode->hdisplay;
-       hblanks = mode->htotal - hfrontp - 1;
-
-       vactive = mode->vtotal * vscan / ilace;
-       vsynce  = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
-       vbackp  = (mode->vtotal - mode->vsync_end) * vscan / ilace;
-       vblanke = vsynce + vbackp;
-       vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
-       vblanks = vactive - vfrontp - 1;
-       if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
-               vblan2e = vactive + vsynce + vbackp;
-               vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
-               vactive = (vactive * 2) + 1;
-       }
-
-       ret = nvd0_crtc_swap_fbs(crtc, old_fb);
-       if (ret)
-               return ret;
-
-       push = evo_wait(crtc->dev, EVO_MASTER, 64);
-       if (push) {
-               evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
-               evo_data(push, 0x00000000);
-               evo_data(push, (vactive << 16) | hactive);
-               evo_data(push, ( vsynce << 16) | hsynce);
-               evo_data(push, (vblanke << 16) | hblanke);
-               evo_data(push, (vblanks << 16) | hblanks);
-               evo_data(push, (vblan2e << 16) | vblan2s);
-               evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
-               evo_data(push, 0x00000000); /* ??? */
-               evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
-               evo_data(push, mode->clock * 1000);
-               evo_data(push, 0x00200000); /* ??? */
-               evo_data(push, mode->clock * 1000);
-               evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
-               evo_data(push, 0x00000311);
-               evo_data(push, 0x00000100);
-               evo_kick(push, crtc->dev, EVO_MASTER);
-       }
-
-       nv_connector = nouveau_crtc_connector_get(nv_crtc);
-       nvd0_crtc_set_dither(nv_crtc, false);
-       nvd0_crtc_set_scale(nv_crtc, false);
-       nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
-       return 0;
-}
-
-static int
-nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
-                       struct drm_framebuffer *old_fb)
-{
-       struct nouveau_drm *drm = nouveau_drm(crtc->dev);
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       int ret;
-
-       if (!crtc->fb) {
-               NV_DEBUG(drm, "No FB bound\n");
-               return 0;
-       }
-
-       ret = nvd0_crtc_swap_fbs(crtc, old_fb);
-       if (ret)
-               return ret;
-
-       nvd0_display_flip_stop(crtc);
-       nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
-       nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
-       return 0;
-}
-
-static int
-nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
-                              struct drm_framebuffer *fb, int x, int y,
-                              enum mode_set_atomic state)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       nvd0_display_flip_stop(crtc);
-       nvd0_crtc_set_image(nv_crtc, fb, x, y, true);
-       return 0;
-}
-
-static void
-nvd0_crtc_lut_load(struct drm_crtc *crtc)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
-       int i;
-
-       for (i = 0; i < 256; i++) {
-               writew(0x6000 + (nv_crtc->lut.r[i] >> 2), lut + (i * 0x20) + 0);
-               writew(0x6000 + (nv_crtc->lut.g[i] >> 2), lut + (i * 0x20) + 2);
-               writew(0x6000 + (nv_crtc->lut.b[i] >> 2), lut + (i * 0x20) + 4);
-       }
-}
-
-static int
-nvd0_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
-                    uint32_t handle, uint32_t width, uint32_t height)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       struct drm_device *dev = crtc->dev;
-       struct drm_gem_object *gem;
-       struct nouveau_bo *nvbo;
-       bool visible = (handle != 0);
-       int i, ret = 0;
-
-       if (visible) {
-               if (width != 64 || height != 64)
-                       return -EINVAL;
-
-               gem = drm_gem_object_lookup(dev, file_priv, handle);
-               if (unlikely(!gem))
-                       return -ENOENT;
-               nvbo = nouveau_gem_object(gem);
-
-               ret = nouveau_bo_map(nvbo);
-               if (ret == 0) {
-                       for (i = 0; i < 64 * 64; i++) {
-                               u32 v = nouveau_bo_rd32(nvbo, i);
-                               nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
-                       }
-                       nouveau_bo_unmap(nvbo);
-               }
-
-               drm_gem_object_unreference_unlocked(gem);
-       }
-
-       if (visible != nv_crtc->cursor.visible) {
-               nvd0_crtc_cursor_show(nv_crtc, visible, true);
-               nv_crtc->cursor.visible = visible;
-       }
-
-       return ret;
-}
-
-static int
-nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       int ch = EVO_CURS(nv_crtc->index);
-
-       evo_piow(crtc->dev, ch, 0x0084, (y << 16) | (x & 0xffff));
-       evo_piow(crtc->dev, ch, 0x0080, 0x00000000);
-       return 0;
-}
-
-static void
-nvd0_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
-                   uint32_t start, uint32_t size)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       u32 end = max(start + size, (u32)256);
-       u32 i;
-
-       for (i = start; i < end; i++) {
-               nv_crtc->lut.r[i] = r[i];
-               nv_crtc->lut.g[i] = g[i];
-               nv_crtc->lut.b[i] = b[i];
-       }
-
-       nvd0_crtc_lut_load(crtc);
-}
-
-static void
-nvd0_crtc_destroy(struct drm_crtc *crtc)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       nouveau_bo_unmap(nv_crtc->cursor.nvbo);
-       nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
-       nouveau_bo_unmap(nv_crtc->lut.nvbo);
-       nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
-       drm_crtc_cleanup(crtc);
-       kfree(crtc);
-}
-
-static const struct drm_crtc_helper_funcs nvd0_crtc_hfunc = {
-       .dpms = nvd0_crtc_dpms,
-       .prepare = nvd0_crtc_prepare,
-       .commit = nvd0_crtc_commit,
-       .mode_fixup = nvd0_crtc_mode_fixup,
-       .mode_set = nvd0_crtc_mode_set,
-       .mode_set_base = nvd0_crtc_mode_set_base,
-       .mode_set_base_atomic = nvd0_crtc_mode_set_base_atomic,
-       .load_lut = nvd0_crtc_lut_load,
-};
-
-static const struct drm_crtc_funcs nvd0_crtc_func = {
-       .cursor_set = nvd0_crtc_cursor_set,
-       .cursor_move = nvd0_crtc_cursor_move,
-       .gamma_set = nvd0_crtc_gamma_set,
-       .set_config = drm_crtc_helper_set_config,
-       .destroy = nvd0_crtc_destroy,
-       .page_flip = nouveau_crtc_page_flip,
-};
-
-static void
-nvd0_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
-{
-}
-
-static void
-nvd0_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
-{
-}
-
-static int
-nvd0_crtc_create(struct drm_device *dev, int index)
-{
-       struct nouveau_crtc *nv_crtc;
-       struct drm_crtc *crtc;
-       int ret, i;
-
-       nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
-       if (!nv_crtc)
-               return -ENOMEM;
-
-       nv_crtc->index = index;
-       nv_crtc->set_dither = nvd0_crtc_set_dither;
-       nv_crtc->set_scale = nvd0_crtc_set_scale;
-       nv_crtc->cursor.set_offset = nvd0_cursor_set_offset;
-       nv_crtc->cursor.set_pos = nvd0_cursor_set_pos;
-       for (i = 0; i < 256; i++) {
-               nv_crtc->lut.r[i] = i << 8;
-               nv_crtc->lut.g[i] = i << 8;
-               nv_crtc->lut.b[i] = i << 8;
-       }
-
-       crtc = &nv_crtc->base;
-       drm_crtc_init(dev, crtc, &nvd0_crtc_func);
-       drm_crtc_helper_add(crtc, &nvd0_crtc_hfunc);
-       drm_mode_crtc_set_gamma_size(crtc, 256);
-
-       ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
-                            0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
-       if (!ret) {
-               ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
-               if (!ret)
-                       ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
-               if (ret)
-                       nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
-       }
-
-       if (ret)
-               goto out;
-
-       ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
-                            0, 0x0000, NULL, &nv_crtc->lut.nvbo);
-       if (!ret) {
-               ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
-               if (!ret)
-                       ret = nouveau_bo_map(nv_crtc->lut.nvbo);
-               if (ret)
-                       nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
-       }
-
-       if (ret)
-               goto out;
-
-       nvd0_crtc_lut_load(crtc);
-
-out:
-       if (ret)
-               nvd0_crtc_destroy(crtc);
-       return ret;
-}
-
-/******************************************************************************
- * DAC
- *****************************************************************************/
-static void
-nvd0_dac_dpms(struct drm_encoder *encoder, int mode)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_device *device = nouveau_dev(dev);
-       int or = nv_encoder->or;
-       u32 dpms_ctrl;
-
-       dpms_ctrl = 0x80000000;
-       if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
-               dpms_ctrl |= 0x00000001;
-       if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
-               dpms_ctrl |= 0x00000004;
-
-       nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
-       nv_mask(device, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl);
-       nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
-}
-
-static bool
-nvd0_dac_mode_fixup(struct drm_encoder *encoder,
-                   const struct drm_display_mode *mode,
-                   struct drm_display_mode *adjusted_mode)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_connector *nv_connector;
-
-       nv_connector = nouveau_encoder_connector_get(nv_encoder);
-       if (nv_connector && nv_connector->native_mode) {
-               if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
-                       int id = adjusted_mode->base.id;
-                       *adjusted_mode = *nv_connector->native_mode;
-                       adjusted_mode->base.id = id;
-               }
-       }
-
-       return true;
-}
-
-static void
-nvd0_dac_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
-                 struct drm_display_mode *adjusted_mode)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
-       u32 syncs, magic, *push;
-
-       syncs = 0x00000001;
-       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-               syncs |= 0x00000008;
-       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
-               syncs |= 0x00000010;
-
-       magic = 0x31ec6000 | (nv_crtc->index << 25);
-       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-               magic |= 0x00000001;
-
-       nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON);
-
-       push = evo_wait(encoder->dev, EVO_MASTER, 8);
-       if (push) {
-               evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
-               evo_data(push, syncs);
-               evo_data(push, magic);
-               evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 2);
-               evo_data(push, 1 << nv_crtc->index);
-               evo_data(push, 0x00ff);
-               evo_kick(push, encoder->dev, EVO_MASTER);
-       }
-
-       nv_encoder->crtc = encoder->crtc;
-}
-
-static void
-nvd0_dac_disconnect(struct drm_encoder *encoder)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct drm_device *dev = encoder->dev;
-       u32 *push;
-
-       if (nv_encoder->crtc) {
-               nvd0_crtc_prepare(nv_encoder->crtc);
-
-               push = evo_wait(dev, EVO_MASTER, 4);
-               if (push) {
-                       evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1);
-                       evo_data(push, 0x00000000);
-                       evo_mthd(push, 0x0080, 1);
-                       evo_data(push, 0x00000000);
-                       evo_kick(push, dev, EVO_MASTER);
-               }
-
-               nv_encoder->crtc = NULL;
-       }
-}
-
-static enum drm_connector_status
-nvd0_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
-       enum drm_connector_status status = connector_status_disconnected;
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_device *device = nouveau_dev(dev);
-       int or = nv_encoder->or;
-       u32 load;
-
-       nv_wr32(device, 0x61a00c + (or * 0x800), 0x00100000);
-       udelay(9500);
-       nv_wr32(device, 0x61a00c + (or * 0x800), 0x80000000);
-
-       load = nv_rd32(device, 0x61a00c + (or * 0x800));
-       if ((load & 0x38000000) == 0x38000000)
-               status = connector_status_connected;
-
-       nv_wr32(device, 0x61a00c + (or * 0x800), 0x00000000);
-       return status;
-}
-
-static void
-nvd0_dac_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-       kfree(encoder);
-}
-
-static const struct drm_encoder_helper_funcs nvd0_dac_hfunc = {
-       .dpms = nvd0_dac_dpms,
-       .mode_fixup = nvd0_dac_mode_fixup,
-       .prepare = nvd0_dac_disconnect,
-       .commit = nvd0_dac_commit,
-       .mode_set = nvd0_dac_mode_set,
-       .disable = nvd0_dac_disconnect,
-       .get_crtc = nvd0_display_crtc_get,
-       .detect = nvd0_dac_detect
-};
-
-static const struct drm_encoder_funcs nvd0_dac_func = {
-       .destroy = nvd0_dac_destroy,
-};
-
-static int
-nvd0_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
-{
-       struct drm_device *dev = connector->dev;
-       struct nouveau_encoder *nv_encoder;
-       struct drm_encoder *encoder;
-
-       nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
-       if (!nv_encoder)
-               return -ENOMEM;
-       nv_encoder->dcb = dcbe;
-       nv_encoder->or = ffs(dcbe->or) - 1;
-
-       encoder = to_drm_encoder(nv_encoder);
-       encoder->possible_crtcs = dcbe->heads;
-       encoder->possible_clones = 0;
-       drm_encoder_init(dev, encoder, &nvd0_dac_func, DRM_MODE_ENCODER_DAC);
-       drm_encoder_helper_add(encoder, &nvd0_dac_hfunc);
-
-       drm_mode_connector_attach_encoder(connector, encoder);
-       return 0;
-}
-
-/******************************************************************************
- * Audio
- *****************************************************************************/
-static void
-nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_connector *nv_connector;
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_device *device = nouveau_dev(dev);
-       int i, or = nv_encoder->or * 0x30;
-
-       nv_connector = nouveau_encoder_connector_get(nv_encoder);
-       if (!drm_detect_monitor_audio(nv_connector->edid))
-               return;
-
-       nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000001);
-
-       drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
-       if (nv_connector->base.eld[0]) {
-               u8 *eld = nv_connector->base.eld;
-
-               for (i = 0; i < eld[2] * 4; i++)
-                       nv_wr32(device, 0x10ec00 + or, (i << 8) | eld[i]);
-               for (i = eld[2] * 4; i < 0x60; i++)
-                       nv_wr32(device, 0x10ec00 + or, (i << 8) | 0x00);
-
-               nv_mask(device, 0x10ec10 + or, 0x80000002, 0x80000002);
-       }
-}
-
-static void
-nvd0_audio_disconnect(struct drm_encoder *encoder)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_device *device = nouveau_dev(dev);
-       int or = nv_encoder->or * 0x30;
-
-       nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000000);
-}
-
-/******************************************************************************
- * HDMI
- *****************************************************************************/
-static void
-nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
-       struct nouveau_connector *nv_connector;
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_device *device = nouveau_dev(dev);
-       int head = nv_crtc->index * 0x800;
-       u32 rekey = 56; /* binary driver, and tegra constant */
-       u32 max_ac_packet;
-
-       nv_connector = nouveau_encoder_connector_get(nv_encoder);
-       if (!drm_detect_hdmi_monitor(nv_connector->edid))
-               return;
-
-       max_ac_packet  = mode->htotal - mode->hdisplay;
-       max_ac_packet -= rekey;
-       max_ac_packet -= 18; /* constant from tegra */
-       max_ac_packet /= 32;
-
-       /* AVI InfoFrame */
-       nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
-       nv_wr32(device, 0x61671c + head, 0x000d0282);
-       nv_wr32(device, 0x616720 + head, 0x0000006f);
-       nv_wr32(device, 0x616724 + head, 0x00000000);
-       nv_wr32(device, 0x616728 + head, 0x00000000);
-       nv_wr32(device, 0x61672c + head, 0x00000000);
-       nv_mask(device, 0x616714 + head, 0x00000001, 0x00000001);
-
-       /* ??? InfoFrame? */
-       nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
-       nv_wr32(device, 0x6167ac + head, 0x00000010);
-       nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000001);
-
-       /* HDMI_CTRL */
-       nv_mask(device, 0x616798 + head, 0x401f007f, 0x40000000 | rekey |
-                                                 max_ac_packet << 16);
-
-       /* NFI, audio doesn't work without it though.. */
-       nv_mask(device, 0x616548 + head, 0x00000070, 0x00000000);
-
-       nvd0_audio_mode_set(encoder, mode);
-}
-
-static void
-nvd0_hdmi_disconnect(struct drm_encoder *encoder)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_device *device = nouveau_dev(dev);
-       int head = nv_crtc->index * 0x800;
-
-       nvd0_audio_disconnect(encoder);
-
-       nv_mask(device, 0x616798 + head, 0x40000000, 0x00000000);
-       nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
-       nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
-}
-
-/******************************************************************************
- * SOR
- *****************************************************************************/
-static inline u32
-nvd0_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
-{
-       static const u8 nvd0[] = { 16, 8, 0, 24 };
-       return nvd0[lane];
-}
-
-static void
-nvd0_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-       const u32 loff = (or * 0x800) + (link * 0x80);
-       nv_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
-}
-
-static void
-nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
-                     u8 lane, u8 swing, u8 preem)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-       const u32 loff = (or * 0x800) + (link * 0x80);
-       u32 shift = nvd0_sor_dp_lane_map(dev, dcb, lane);
-       u32 mask = 0x000000ff << shift;
-       u8 *table, *entry, *config = NULL;
-
-       switch (swing) {
-       case 0: preem += 0; break;
-       case 1: preem += 4; break;
-       case 2: preem += 7; break;
-       case 3: preem += 9; break;
-       }
-
-       table = nouveau_dp_bios_data(dev, dcb, &entry);
-       if (table) {
-               if (table[0] == 0x30) {
-                       config  = entry + table[4];
-                       config += table[5] * preem;
-               } else
-               if (table[0] == 0x40) {
-                       config  = table + table[1];
-                       config += table[2] * table[3];
-                       config += table[6] * preem;
-               }
-       }
-
-       if (!config) {
-               NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
-               return;
-       }
-
-       nv_mask(device, 0x61c118 + loff, mask, config[1] << shift);
-       nv_mask(device, 0x61c120 + loff, mask, config[2] << shift);
-       nv_mask(device, 0x61c130 + loff, 0x0000ff00, config[3] << 8);
-       nv_mask(device, 0x61c13c + loff, 0x00000000, 0x00000000);
-}
-
-static void
-nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
-                    int link_nr, u32 link_bw, bool enhframe)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-       const u32 loff = (or * 0x800) + (link * 0x80);
-       const u32 soff = (or * 0x800);
-       u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & ~0x001f4000;
-       u32 clksor = nv_rd32(device, 0x612300 + soff) & ~0x007c0000;
-       u32 script = 0x0000, lane_mask = 0;
-       u8 *table, *entry;
-       int i;
-
-       link_bw /= 27000;
-
-       table = nouveau_dp_bios_data(dev, dcb, &entry);
-       if (table) {
-               if      (table[0] == 0x30) entry = ROMPTR(dev, entry[10]);
-               else if (table[0] == 0x40) entry = ROMPTR(dev, entry[9]);
-               else                       entry = NULL;
-
-               while (entry) {
-                       if (entry[0] >= link_bw)
-                               break;
-                       entry += 3;
-               }
-
-               nouveau_bios_run_init_table(dev, script, dcb, crtc);
-       }
-
-       clksor |= link_bw << 18;
-       dpctrl |= ((1 << link_nr) - 1) << 16;
-       if (enhframe)
-               dpctrl |= 0x00004000;
-
-       for (i = 0; i < link_nr; i++)
-               lane_mask |= 1 << (nvd0_sor_dp_lane_map(dev, dcb, i) >> 3);
-
-       nv_wr32(device, 0x612300 + soff, clksor);
-       nv_wr32(device, 0x61c10c + loff, dpctrl);
-       nv_mask(device, 0x61c130 + loff, 0x0000000f, lane_mask);
-}
-
-static void
-nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_output *dcb,
-                    u32 *link_nr, u32 *link_bw)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-       const u32 loff = (or * 0x800) + (link * 0x80);
-       const u32 soff = (or * 0x800);
-       u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & 0x000f0000;
-       u32 clksor = nv_rd32(device, 0x612300 + soff);
-
-       if      (dpctrl > 0x00030000) *link_nr = 4;
-       else if (dpctrl > 0x00010000) *link_nr = 2;
-       else                          *link_nr = 1;
-
-       *link_bw  = (clksor & 0x007c0000) >> 18;
-       *link_bw *= 27000;
-}
-
-static void
-nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_output *dcb,
-                   u32 crtc, u32 datarate)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       const u32 symbol = 100000;
-       const u32 TU = 64;
-       u32 link_nr, link_bw;
-       u64 ratio, value;
-
-       nvd0_sor_dp_link_get(dev, dcb, &link_nr, &link_bw);
-
-       ratio  = datarate;
-       ratio *= symbol;
-       do_div(ratio, link_nr * link_bw);
-
-       value  = (symbol - ratio) * TU;
-       value *= ratio;
-       do_div(value, symbol);
-       do_div(value, symbol);
-
-       value += 5;
-       value |= 0x08000000;
-
-       nv_wr32(device, 0x616610 + (crtc * 0x800), value);
-}
-
-static void
-nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct drm_encoder *partner;
-       int or = nv_encoder->or;
-       u32 dpms_ctrl;
-
-       nv_encoder->last_dpms = mode;
-
-       list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
-               struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
-
-               if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
-                       continue;
-
-               if (nv_partner != nv_encoder &&
-                   nv_partner->dcb->or == nv_encoder->dcb->or) {
-                       if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
-                               return;
-                       break;
-               }
-       }
-
-       dpms_ctrl  = (mode == DRM_MODE_DPMS_ON);
-       dpms_ctrl |= 0x80000000;
-
-       nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
-       nv_mask(device, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl);
-       nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
-       nv_wait(device, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
-
-       if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
-               struct dp_train_func func = {
-                       .link_set = nvd0_sor_dp_link_set,
-                       .train_set = nvd0_sor_dp_train_set,
-                       .train_adj = nvd0_sor_dp_train_adj
-               };
-
-               nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
-       }
-}
-
-static bool
-nvd0_sor_mode_fixup(struct drm_encoder *encoder,
-                   const struct drm_display_mode *mode,
-                   struct drm_display_mode *adjusted_mode)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_connector *nv_connector;
-
-       nv_connector = nouveau_encoder_connector_get(nv_encoder);
-       if (nv_connector && nv_connector->native_mode) {
-               if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
-                       int id = adjusted_mode->base.id;
-                       *adjusted_mode = *nv_connector->native_mode;
-                       adjusted_mode->base.id = id;
-               }
-       }
-
-       return true;
-}
-
-static void
-nvd0_sor_disconnect(struct drm_encoder *encoder)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct drm_device *dev = encoder->dev;
-       u32 *push;
-
-       if (nv_encoder->crtc) {
-               nvd0_crtc_prepare(nv_encoder->crtc);
-
-               push = evo_wait(dev, EVO_MASTER, 4);
-               if (push) {
-                       evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
-                       evo_data(push, 0x00000000);
-                       evo_mthd(push, 0x0080, 1);
-                       evo_data(push, 0x00000000);
-                       evo_kick(push, dev, EVO_MASTER);
-               }
-
-               nvd0_hdmi_disconnect(encoder);
-
-               nv_encoder->crtc = NULL;
-               nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-       }
-}
-
-static void
-nvd0_sor_prepare(struct drm_encoder *encoder)
-{
-       nvd0_sor_disconnect(encoder);
-       if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
-               evo_sync(encoder->dev, EVO_MASTER);
-}
-
-static void
-nvd0_sor_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
-                 struct drm_display_mode *mode)
-{
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
-       struct nouveau_connector *nv_connector;
-       struct nvbios *bios = &drm->vbios;
-       u32 mode_ctrl = (1 << nv_crtc->index);
-       u32 syncs, magic, *push;
-       u32 or_config;
-
-       syncs = 0x00000001;
-       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-               syncs |= 0x00000008;
-       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
-               syncs |= 0x00000010;
-
-       magic = 0x31ec6000 | (nv_crtc->index << 25);
-       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-               magic |= 0x00000001;
-
-       nv_connector = nouveau_encoder_connector_get(nv_encoder);
-       switch (nv_encoder->dcb->type) {
-       case DCB_OUTPUT_TMDS:
-               if (nv_encoder->dcb->sorconf.link & 1) {
-                       if (mode->clock < 165000)
-                               mode_ctrl |= 0x00000100;
-                       else
-                               mode_ctrl |= 0x00000500;
-               } else {
-                       mode_ctrl |= 0x00000200;
-               }
-
-               or_config = (mode_ctrl & 0x00000f00) >> 8;
-               if (mode->clock >= 165000)
-                       or_config |= 0x0100;
-
-               nvd0_hdmi_mode_set(encoder, mode);
-               break;
-       case DCB_OUTPUT_LVDS:
-               or_config = (mode_ctrl & 0x00000f00) >> 8;
-               if (bios->fp_no_ddc) {
-                       if (bios->fp.dual_link)
-                               or_config |= 0x0100;
-                       if (bios->fp.if_is_24bit)
-                               or_config |= 0x0200;
-               } else {
-                       if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
-                               if (((u8 *)nv_connector->edid)[121] == 2)
-                                       or_config |= 0x0100;
-                       } else
-                       if (mode->clock >= bios->fp.duallink_transition_clk) {
-                               or_config |= 0x0100;
-                       }
-
-                       if (or_config & 0x0100) {
-                               if (bios->fp.strapless_is_24bit & 2)
-                                       or_config |= 0x0200;
-                       } else {
-                               if (bios->fp.strapless_is_24bit & 1)
-                                       or_config |= 0x0200;
-                       }
-
-                       if (nv_connector->base.display_info.bpc == 8)
-                               or_config |= 0x0200;
-
-               }
-               break;
-       case DCB_OUTPUT_DP:
-               if (nv_connector->base.display_info.bpc == 6) {
-                       nv_encoder->dp.datarate = mode->clock * 18 / 8;
-                       syncs |= 0x00000002 << 6;
-               } else {
-                       nv_encoder->dp.datarate = mode->clock * 24 / 8;
-                       syncs |= 0x00000005 << 6;
-               }
-
-               if (nv_encoder->dcb->sorconf.link & 1)
-                       mode_ctrl |= 0x00000800;
-               else
-                       mode_ctrl |= 0x00000900;
-
-               or_config = (mode_ctrl & 0x00000f00) >> 8;
-               break;
-       default:
-               BUG_ON(1);
-               break;
-       }
-
-       nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
-
-       if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
-               nvd0_sor_dp_calc_tu(dev, nv_encoder->dcb, nv_crtc->index,
-                                        nv_encoder->dp.datarate);
-       }
-
-       push = evo_wait(dev, EVO_MASTER, 8);
-       if (push) {
-               evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
-               evo_data(push, syncs);
-               evo_data(push, magic);
-               evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 2);
-               evo_data(push, mode_ctrl);
-               evo_data(push, or_config);
-               evo_kick(push, dev, EVO_MASTER);
-       }
-
-       nv_encoder->crtc = encoder->crtc;
-}
-
-static void
-nvd0_sor_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-       kfree(encoder);
-}
-
-static const struct drm_encoder_helper_funcs nvd0_sor_hfunc = {
-       .dpms = nvd0_sor_dpms,
-       .mode_fixup = nvd0_sor_mode_fixup,
-       .prepare = nvd0_sor_prepare,
-       .commit = nvd0_sor_commit,
-       .mode_set = nvd0_sor_mode_set,
-       .disable = nvd0_sor_disconnect,
-       .get_crtc = nvd0_display_crtc_get,
-};
-
-static const struct drm_encoder_funcs nvd0_sor_func = {
-       .destroy = nvd0_sor_destroy,
-};
-
-static int
-nvd0_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
-{
-       struct drm_device *dev = connector->dev;
-       struct nouveau_encoder *nv_encoder;
-       struct drm_encoder *encoder;
-
-       nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
-       if (!nv_encoder)
-               return -ENOMEM;
-       nv_encoder->dcb = dcbe;
-       nv_encoder->or = ffs(dcbe->or) - 1;
-       nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-
-       encoder = to_drm_encoder(nv_encoder);
-       encoder->possible_crtcs = dcbe->heads;
-       encoder->possible_clones = 0;
-       drm_encoder_init(dev, encoder, &nvd0_sor_func, DRM_MODE_ENCODER_TMDS);
-       drm_encoder_helper_add(encoder, &nvd0_sor_hfunc);
-
-       drm_mode_connector_attach_encoder(connector, encoder);
-       return 0;
-}
-
-/******************************************************************************
- * IRQ
- *****************************************************************************/
-static struct dcb_output *
-lookup_dcb(struct drm_device *dev, int id, u32 mc)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       int type, or, i, link = -1;
-
-       if (id < 4) {
-               type = DCB_OUTPUT_ANALOG;
-               or   = id;
-       } else {
-               switch (mc & 0x00000f00) {
-               case 0x00000000: link = 0; type = DCB_OUTPUT_LVDS; break;
-               case 0x00000100: link = 0; type = DCB_OUTPUT_TMDS; break;
-               case 0x00000200: link = 1; type = DCB_OUTPUT_TMDS; break;
-               case 0x00000500: link = 0; type = DCB_OUTPUT_TMDS; break;
-               case 0x00000800: link = 0; type = DCB_OUTPUT_DP; break;
-               case 0x00000900: link = 1; type = DCB_OUTPUT_DP; break;
-               default:
-                       NV_ERROR(drm, "PDISP: unknown SOR mc 0x%08x\n", mc);
-                       return NULL;
-               }
-
-               or = id - 4;
-       }
-
-       for (i = 0; i < drm->vbios.dcb.entries; i++) {
-               struct dcb_output *dcb = &drm->vbios.dcb.entry[i];
-               if (dcb->type == type && (dcb->or & (1 << or)) &&
-                   (link < 0 || link == !(dcb->sorconf.link & 1)))
-                       return dcb;
-       }
-
-       NV_ERROR(drm, "PDISP: DCB for %d/0x%08x not found\n", id, mc);
-       return NULL;
-}
-
-static void
-nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct dcb_output *dcb;
-       int i;
-
-       for (i = 0; mask && i < 8; i++) {
-               u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
-               if (!(mcc & (1 << crtc)))
-                       continue;
-
-               dcb = lookup_dcb(dev, i, mcc);
-               if (!dcb)
-                       continue;
-
-               nouveau_bios_run_display_table(dev, 0x0000, -1, dcb, crtc);
-       }
-
-       nv_wr32(device, 0x6101d4, 0x00000000);
-       nv_wr32(device, 0x6109d4, 0x00000000);
-       nv_wr32(device, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct dcb_output *dcb;
-       u32 or, tmp, pclk;
-       int i;
-
-       for (i = 0; mask && i < 8; i++) {
-               u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
-               if (!(mcc & (1 << crtc)))
-                       continue;
-
-               dcb = lookup_dcb(dev, i, mcc);
-               if (!dcb)
-                       continue;
-
-               nouveau_bios_run_display_table(dev, 0x0000, -2, dcb, crtc);
-       }
-
-       pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
-       NV_DEBUG(drm, "PDISP: crtc %d pclk %d mask 0x%08x\n",
-                         crtc, pclk, mask);
-       if (pclk && (mask & 0x00010000)) {
-               nv50_crtc_set_clock(dev, crtc, pclk);
-       }
-
-       for (i = 0; mask && i < 8; i++) {
-               u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
-               u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
-               if (!(mcp & (1 << crtc)))
-                       continue;
-
-               dcb = lookup_dcb(dev, i, mcp);
-               if (!dcb)
-                       continue;
-               or = ffs(dcb->or) - 1;
-
-               nouveau_bios_run_display_table(dev, cfg, pclk, dcb, crtc);
-
-               nv_wr32(device, 0x612200 + (crtc * 0x800), 0x00000000);
-               switch (dcb->type) {
-               case DCB_OUTPUT_ANALOG:
-                       nv_wr32(device, 0x612280 + (or * 0x800), 0x00000000);
-                       break;
-               case DCB_OUTPUT_TMDS:
-               case DCB_OUTPUT_LVDS:
-               case DCB_OUTPUT_DP:
-                       if (cfg & 0x00000100)
-                               tmp = 0x00000101;
-                       else
-                               tmp = 0x00000000;
-
-                       nv_mask(device, 0x612300 + (or * 0x800), 0x00000707, tmp);
-                       break;
-               default:
-                       break;
-               }
-
-               break;
-       }
-
-       nv_wr32(device, 0x6101d4, 0x00000000);
-       nv_wr32(device, 0x6109d4, 0x00000000);
-       nv_wr32(device, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct dcb_output *dcb;
-       int pclk, i;
-
-       pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
-
-       for (i = 0; mask && i < 8; i++) {
-               u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
-               u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
-               if (!(mcp & (1 << crtc)))
-                       continue;
-
-               dcb = lookup_dcb(dev, i, mcp);
-               if (!dcb)
-                       continue;
-
-               nouveau_bios_run_display_table(dev, cfg, -pclk, dcb, crtc);
-       }
-
-       nv_wr32(device, 0x6101d4, 0x00000000);
-       nv_wr32(device, 0x6109d4, 0x00000000);
-       nv_wr32(device, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_bh(unsigned long data)
-{
-       struct drm_device *dev = (struct drm_device *)data;
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nvd0_display *disp = nvd0_display(dev);
-       u32 mask = 0, crtc = ~0;
-       int i;
-
-       if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) {
-               NV_INFO(drm, "PDISP: modeset req %d\n", disp->modeset);
-               NV_INFO(drm, " STAT: 0x%08x 0x%08x 0x%08x\n",
-                        nv_rd32(device, 0x6101d0),
-                        nv_rd32(device, 0x6101d4), nv_rd32(device, 0x6109d4));
-               for (i = 0; i < 8; i++) {
-                       NV_INFO(drm, " %s%d: 0x%08x 0x%08x\n",
-                               i < 4 ? "DAC" : "SOR", i,
-                               nv_rd32(device, 0x640180 + (i * 0x20)),
-                               nv_rd32(device, 0x660180 + (i * 0x20)));
-               }
-       }
-
-       while (!mask && ++crtc < dev->mode_config.num_crtc)
-               mask = nv_rd32(device, 0x6101d4 + (crtc * 0x800));
-
-       if (disp->modeset & 0x00000001)
-               nvd0_display_unk1_handler(dev, crtc, mask);
-       if (disp->modeset & 0x00000002)
-               nvd0_display_unk2_handler(dev, crtc, mask);
-       if (disp->modeset & 0x00000004)
-               nvd0_display_unk4_handler(dev, crtc, mask);
-}
-
-void
-nvd0_display_intr(struct drm_device *dev)
-{
-       struct nvd0_display *disp = nvd0_display(dev);
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       u32 intr = nv_rd32(device, 0x610088);
-
-       if (intr & 0x00000001) {
-               u32 stat = nv_rd32(device, 0x61008c);
-               nv_wr32(device, 0x61008c, stat);
-               intr &= ~0x00000001;
-       }
-
-       if (intr & 0x00000002) {
-               u32 stat = nv_rd32(device, 0x61009c);
-               int chid = ffs(stat) - 1;
-               if (chid >= 0) {
-                       u32 mthd = nv_rd32(device, 0x6101f0 + (chid * 12));
-                       u32 data = nv_rd32(device, 0x6101f4 + (chid * 12));
-                       u32 unkn = nv_rd32(device, 0x6101f8 + (chid * 12));
-
-                       NV_INFO(drm, "EvoCh: chid %d mthd 0x%04x data 0x%08x "
-                                    "0x%08x 0x%08x\n",
-                               chid, (mthd & 0x0000ffc), data, mthd, unkn);
-                       nv_wr32(device, 0x61009c, (1 << chid));
-                       nv_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
-               }
-
-               intr &= ~0x00000002;
-       }
-
-       if (intr & 0x00100000) {
-               u32 stat = nv_rd32(device, 0x6100ac);
-
-               if (stat & 0x00000007) {
-                       disp->modeset = stat;
-                       tasklet_schedule(&disp->tasklet);
-
-                       nv_wr32(device, 0x6100ac, (stat & 0x00000007));
-                       stat &= ~0x00000007;
-               }
-
-               if (stat) {
-                       NV_INFO(drm, "PDISP: unknown intr24 0x%08x\n", stat);
-                       nv_wr32(device, 0x6100ac, stat);
-               }
-
-               intr &= ~0x00100000;
-       }
-
-       intr &= ~0x0f000000; /* vblank, handled in core */
-       if (intr)
-               NV_INFO(drm, "PDISP: unknown intr 0x%08x\n", intr);
-}
-
-/******************************************************************************
- * Init
- *****************************************************************************/
-void
-nvd0_display_fini(struct drm_device *dev)
-{
-       int i;
-
-       /* fini cursors + overlays + flips */
-       for (i = 1; i >= 0; i--) {
-               evo_fini_pio(dev, EVO_CURS(i));
-               evo_fini_pio(dev, EVO_OIMM(i));
-               evo_fini_dma(dev, EVO_OVLY(i));
-               evo_fini_dma(dev, EVO_FLIP(i));
-       }
-
-       /* fini master */
-       evo_fini_dma(dev, EVO_MASTER);
-}
-
-int
-nvd0_display_init(struct drm_device *dev)
-{
-       struct nvd0_display *disp = nvd0_display(dev);
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       int ret, i;
-       u32 *push;
-
-       if (nv_rd32(device, 0x6100ac) & 0x00000100) {
-               nv_wr32(device, 0x6100ac, 0x00000100);
-               nv_mask(device, 0x6194e8, 0x00000001, 0x00000000);
-               if (!nv_wait(device, 0x6194e8, 0x00000002, 0x00000000)) {
-                       NV_ERROR(drm, "PDISP: 0x6194e8 0x%08x\n",
-                                nv_rd32(device, 0x6194e8));
-                       return -EBUSY;
-               }
-       }
-
-       /* nfi what these are exactly, i do know that SOR_MODE_CTRL won't
-        * work at all unless you do the SOR part below.
-        */
-       for (i = 0; i < 3; i++) {
-               u32 dac = nv_rd32(device, 0x61a000 + (i * 0x800));
-               nv_wr32(device, 0x6101c0 + (i * 0x800), dac);
-       }
-
-       for (i = 0; i < 4; i++) {
-               u32 sor = nv_rd32(device, 0x61c000 + (i * 0x800));
-               nv_wr32(device, 0x6301c4 + (i * 0x800), sor);
-       }
-
-       for (i = 0; i < dev->mode_config.num_crtc; i++) {
-               u32 crtc0 = nv_rd32(device, 0x616104 + (i * 0x800));
-               u32 crtc1 = nv_rd32(device, 0x616108 + (i * 0x800));
-               u32 crtc2 = nv_rd32(device, 0x61610c + (i * 0x800));
-               nv_wr32(device, 0x6101b4 + (i * 0x800), crtc0);
-               nv_wr32(device, 0x6101b8 + (i * 0x800), crtc1);
-               nv_wr32(device, 0x6101bc + (i * 0x800), crtc2);
-       }
-
-       /* point at our hash table / objects, enable interrupts */
-       nv_wr32(device, 0x610010, (disp->mem->addr >> 8) | 9);
-       nv_mask(device, 0x6100b0, 0x00000307, 0x00000307);
-
-       /* init master */
-       ret = evo_init_dma(dev, EVO_MASTER);
-       if (ret)
-               goto error;
-
-       /* init flips + overlays + cursors */
-       for (i = 0; i < dev->mode_config.num_crtc; i++) {
-               if ((ret = evo_init_dma(dev, EVO_FLIP(i))) ||
-                   (ret = evo_init_dma(dev, EVO_OVLY(i))) ||
-                   (ret = evo_init_pio(dev, EVO_OIMM(i))) ||
-                   (ret = evo_init_pio(dev, EVO_CURS(i))))
-                       goto error;
-       }
-
-       push = evo_wait(dev, EVO_MASTER, 32);
-       if (!push) {
-               ret = -EBUSY;
-               goto error;
-       }
-       evo_mthd(push, 0x0088, 1);
-       evo_data(push, NvEvoSync);
-       evo_mthd(push, 0x0084, 1);
-       evo_data(push, 0x00000000);
-       evo_mthd(push, 0x0084, 1);
-       evo_data(push, 0x80000000);
-       evo_mthd(push, 0x008c, 1);
-       evo_data(push, 0x00000000);
-       evo_kick(push, dev, EVO_MASTER);
-
-error:
-       if (ret)
-               nvd0_display_fini(dev);
-       return ret;
-}
-
-void
-nvd0_display_destroy(struct drm_device *dev)
-{
-       struct nvd0_display *disp = nvd0_display(dev);
-       struct pci_dev *pdev = dev->pdev;
-       int i;
-
-       for (i = 0; i < EVO_DMA_NR; i++) {
-               struct evo *evo = &disp->evo[i];
-               pci_free_consistent(pdev, PAGE_SIZE, evo->ptr, evo->handle);
-       }
-
-       nouveau_gpuobj_ref(NULL, &disp->mem);
-       nouveau_bo_unmap(disp->sync);
-       nouveau_bo_ref(NULL, &disp->sync);
-
-       nouveau_display(dev)->priv = NULL;
-       kfree(disp);
-}
-
-int
-nvd0_display_create(struct drm_device *dev)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_bar *bar = nouveau_bar(device);
-       struct nouveau_fb *pfb = nouveau_fb(device);
-       struct dcb_table *dcb = &drm->vbios.dcb;
-       struct drm_connector *connector, *tmp;
-       struct pci_dev *pdev = dev->pdev;
-       struct nvd0_display *disp;
-       struct dcb_output *dcbe;
-       int crtcs, ret, i;
-
-       disp = kzalloc(sizeof(*disp), GFP_KERNEL);
-       if (!disp)
-               return -ENOMEM;
-
-       nouveau_display(dev)->priv = disp;
-       nouveau_display(dev)->dtor = nvd0_display_destroy;
-       nouveau_display(dev)->init = nvd0_display_init;
-       nouveau_display(dev)->fini = nvd0_display_fini;
-
-       /* create crtc objects to represent the hw heads */
-       crtcs = nv_rd32(device, 0x022448);
-       for (i = 0; i < crtcs; i++) {
-               ret = nvd0_crtc_create(dev, i);
-               if (ret)
-                       goto out;
-       }
-
-       /* create encoder/connector objects based on VBIOS DCB table */
-       for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
-               connector = nouveau_connector_create(dev, dcbe->connector);
-               if (IS_ERR(connector))
-                       continue;
-
-               if (dcbe->location != DCB_LOC_ON_CHIP) {
-                       NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
-                               dcbe->type, ffs(dcbe->or) - 1);
-                       continue;
-               }
-
-               switch (dcbe->type) {
-               case DCB_OUTPUT_TMDS:
-               case DCB_OUTPUT_LVDS:
-               case DCB_OUTPUT_DP:
-                       nvd0_sor_create(connector, dcbe);
-                       break;
-               case DCB_OUTPUT_ANALOG:
-                       nvd0_dac_create(connector, dcbe);
-                       break;
-               default:
-                       NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
-                               dcbe->type, ffs(dcbe->or) - 1);
-                       continue;
-               }
-       }
-
-       /* cull any connectors we created that don't have an encoder */
-       list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
-               if (connector->encoder_ids[0])
-                       continue;
-
-               NV_WARN(drm, "%s has no encoders, removing\n",
-                       drm_get_connector_name(connector));
-               connector->funcs->destroy(connector);
-       }
-
-       /* setup interrupt handling */
-       tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
-
-       /* small shared memory area we use for notifiers and semaphores */
-       ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
-                            0, 0x0000, NULL, &disp->sync);
-       if (!ret) {
-               ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
-               if (!ret)
-                       ret = nouveau_bo_map(disp->sync);
-               if (ret)
-                       nouveau_bo_ref(NULL, &disp->sync);
-       }
-
-       if (ret)
-               goto out;
-
-       /* hash table and dma objects for the memory areas we care about */
-       ret = nouveau_gpuobj_new(nv_object(device), NULL, 0x4000, 0x10000,
-                                NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
-       if (ret)
-               goto out;
-
-       /* create evo dma channels */
-       for (i = 0; i < EVO_DMA_NR; i++) {
-               struct evo *evo = &disp->evo[i];
-               u64 offset = disp->sync->bo.offset;
-               u32 dmao = 0x1000 + (i * 0x100);
-               u32 hash = 0x0000 + (i * 0x040);
-
-               evo->idx = i;
-               evo->sem.offset = EVO_SYNC(evo->idx, 0x00);
-               evo->ptr = pci_alloc_consistent(pdev, PAGE_SIZE, &evo->handle);
-               if (!evo->ptr) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
-
-               nv_wo32(disp->mem, dmao + 0x00, 0x00000049);
-               nv_wo32(disp->mem, dmao + 0x04, (offset + 0x0000) >> 8);
-               nv_wo32(disp->mem, dmao + 0x08, (offset + 0x0fff) >> 8);
-               nv_wo32(disp->mem, dmao + 0x0c, 0x00000000);
-               nv_wo32(disp->mem, dmao + 0x10, 0x00000000);
-               nv_wo32(disp->mem, dmao + 0x14, 0x00000000);
-               nv_wo32(disp->mem, hash + 0x00, NvEvoSync);
-               nv_wo32(disp->mem, hash + 0x04, 0x00000001 | (i << 27) |
-                                               ((dmao + 0x00) << 9));
-
-               nv_wo32(disp->mem, dmao + 0x20, 0x00000049);
-               nv_wo32(disp->mem, dmao + 0x24, 0x00000000);
-               nv_wo32(disp->mem, dmao + 0x28, (pfb->ram.size - 1) >> 8);
-               nv_wo32(disp->mem, dmao + 0x2c, 0x00000000);
-               nv_wo32(disp->mem, dmao + 0x30, 0x00000000);
-               nv_wo32(disp->mem, dmao + 0x34, 0x00000000);
-               nv_wo32(disp->mem, hash + 0x08, NvEvoVRAM);
-               nv_wo32(disp->mem, hash + 0x0c, 0x00000001 | (i << 27) |
-                                               ((dmao + 0x20) << 9));
-
-               nv_wo32(disp->mem, dmao + 0x40, 0x00000009);
-               nv_wo32(disp->mem, dmao + 0x44, 0x00000000);
-               nv_wo32(disp->mem, dmao + 0x48, (pfb->ram.size - 1) >> 8);
-               nv_wo32(disp->mem, dmao + 0x4c, 0x00000000);
-               nv_wo32(disp->mem, dmao + 0x50, 0x00000000);
-               nv_wo32(disp->mem, dmao + 0x54, 0x00000000);
-               nv_wo32(disp->mem, hash + 0x10, NvEvoVRAM_LP);
-               nv_wo32(disp->mem, hash + 0x14, 0x00000001 | (i << 27) |
-                                               ((dmao + 0x40) << 9));
-
-               nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009);
-               nv_wo32(disp->mem, dmao + 0x64, 0x00000000);
-               nv_wo32(disp->mem, dmao + 0x68, (pfb->ram.size - 1) >> 8);
-               nv_wo32(disp->mem, dmao + 0x6c, 0x00000000);
-               nv_wo32(disp->mem, dmao + 0x70, 0x00000000);
-               nv_wo32(disp->mem, dmao + 0x74, 0x00000000);
-               nv_wo32(disp->mem, hash + 0x18, NvEvoFB32);
-               nv_wo32(disp->mem, hash + 0x1c, 0x00000001 | (i << 27) |
-                                               ((dmao + 0x60) << 9));
-       }
-
-       bar->flush(bar);
-
-out:
-       if (ret)
-               nvd0_display_destroy(dev);
-       return ret;
-}
index 2e566e1..9175615 100644 (file)
@@ -561,6 +561,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                /* use frac fb div on APUs */
                if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
                        radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+               if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
+                       radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
        } else {
                radeon_crtc->pll_flags |= RADEON_PLL_LEGACY;
 
@@ -1697,34 +1699,22 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
                DRM_ERROR("unable to allocate a PPLL\n");
                return ATOM_PPLL_INVALID;
        } else {
-               if (ASIC_IS_AVIVO(rdev)) {
-                       /* in DP mode, the DP ref clock can come from either PPLL
-                        * depending on the asic:
-                        * DCE3: PPLL1 or PPLL2
-                        */
-                       if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
-                               /* use the same PPLL for all DP monitors */
-                               pll = radeon_get_shared_dp_ppll(crtc);
-                               if (pll != ATOM_PPLL_INVALID)
-                                       return pll;
-                       } else {
-                               /* use the same PPLL for all monitors with the same clock */
-                               pll = radeon_get_shared_nondp_ppll(crtc);
-                               if (pll != ATOM_PPLL_INVALID)
-                                       return pll;
-                       }
-                       /* all other cases */
-                       pll_in_use = radeon_get_pll_use_mask(crtc);
-                       if (!(pll_in_use & (1 << ATOM_PPLL1)))
-                               return ATOM_PPLL1;
-                       if (!(pll_in_use & (1 << ATOM_PPLL2)))
-                               return ATOM_PPLL2;
-                       DRM_ERROR("unable to allocate a PPLL\n");
-                       return ATOM_PPLL_INVALID;
-               } else {
-                       /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
-                       return radeon_crtc->crtc_id;
-               }
+               /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
+               /* some atombios (observed in some DCE2/DCE3) code have a bug,
+                * the matching btw pll and crtc is done through
+                * PCLK_CRTC[1|2]_CNTL (0x480/0x484) but atombios code use the
+                * pll (1 or 2) to select which register to write. ie if using
+                * pll1 it will use PCLK_CRTC1_CNTL (0x480) and if using pll2
+                * it will use PCLK_CRTC2_CNTL (0x484), it then use crtc id to
+                * choose which value to write. Which is reverse order from
+                * register logic. So only case that works is when pllid is
+                * same as crtcid or when both pll and crtc are enabled and
+                * both use same clock.
+                *
+                * So just return crtc id as if crtc and pll were hard linked
+                * together even if they aren't
+                */
+               return radeon_crtc->crtc_id;
        }
 }
 
index ba498f8..4552d4a 100644 (file)
@@ -340,7 +340,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
            ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
             (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
                struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-               radeon_dp_set_link_config(connector, mode);
+               radeon_dp_set_link_config(connector, adjusted_mode);
        }
 
        return true;
@@ -1625,7 +1625,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
                        atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
                        atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
                        /* some early dce3.2 boards have a bug in their transmitter control table */
-                       if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730))
+                       if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730))
                                atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
                }
                if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
index 14313ad..8dbc69a 100644 (file)
@@ -1330,6 +1330,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
                                        break;
                                udelay(1);
                        }
+               } else {
+                       save->crtc_enabled[i] = false;
                }
        }
 
@@ -1372,7 +1374,7 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
        WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
 
        for (i = 0; i < rdev->num_crtc; i++) {
-               if (save->crtc_enabled) {
+               if (save->crtc_enabled[i]) {
                        if (ASIC_IS_DCE6(rdev)) {
                                tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
                                tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
@@ -1819,7 +1821,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        case CHIP_SUMO:
                rdev->config.evergreen.num_ses = 1;
                rdev->config.evergreen.max_pipes = 4;
-               rdev->config.evergreen.max_tile_pipes = 2;
+               rdev->config.evergreen.max_tile_pipes = 4;
                if (rdev->pdev->device == 0x9648)
                        rdev->config.evergreen.max_simds = 3;
                else if ((rdev->pdev->device == 0x9647) ||
@@ -1842,7 +1844,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x40;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
-               gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
+               gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_SUMO2:
                rdev->config.evergreen.num_ses = 1;
@@ -1864,7 +1866,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x40;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
-               gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
+               gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_BARTS:
                rdev->config.evergreen.num_ses = 2;
@@ -1912,7 +1914,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                break;
        case CHIP_CAICOS:
                rdev->config.evergreen.num_ses = 1;
-               rdev->config.evergreen.max_pipes = 4;
+               rdev->config.evergreen.max_pipes = 2;
                rdev->config.evergreen.max_tile_pipes = 2;
                rdev->config.evergreen.max_simds = 2;
                rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
@@ -2032,6 +2034,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        WREG32(GB_ADDR_CONFIG, gb_addr_config);
        WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+       WREG32(DMA_TILING_CONFIG, gb_addr_config);
 
        tmp = gb_addr_config & NUM_PIPES_MASK;
        tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
@@ -2401,8 +2404,12 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
                                         CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
                cayman_cp_int_cntl_setup(rdev, 1, 0);
                cayman_cp_int_cntl_setup(rdev, 2, 0);
+               tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
+               WREG32(CAYMAN_DMA1_CNTL, tmp);
        } else
                WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+       tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+       WREG32(DMA_CNTL, tmp);
        WREG32(GRBM_INT_CNTL, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -2455,6 +2462,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
        u32 grbm_int_cntl = 0;
        u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
        u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
+       u32 dma_cntl, dma_cntl1 = 0;
 
        if (!rdev->irq.installed) {
                WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -2482,6 +2490,8 @@ int evergreen_irq_set(struct radeon_device *rdev)
        afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
        afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
 
+       dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+
        if (rdev->family >= CHIP_CAYMAN) {
                /* enable CP interrupts on all rings */
                if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
@@ -2504,6 +2514,19 @@ int evergreen_irq_set(struct radeon_device *rdev)
                }
        }
 
+       if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+               DRM_DEBUG("r600_irq_set: sw int dma\n");
+               dma_cntl |= TRAP_ENABLE;
+       }
+
+       if (rdev->family >= CHIP_CAYMAN) {
+               dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
+               if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+                       DRM_DEBUG("r600_irq_set: sw int dma1\n");
+                       dma_cntl1 |= TRAP_ENABLE;
+               }
+       }
+
        if (rdev->irq.crtc_vblank_int[0] ||
            atomic_read(&rdev->irq.pflip[0])) {
                DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -2589,6 +2612,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
                cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
        } else
                WREG32(CP_INT_CNTL, cp_int_cntl);
+
+       WREG32(DMA_CNTL, dma_cntl);
+
+       if (rdev->family >= CHIP_CAYMAN)
+               WREG32(CAYMAN_DMA1_CNTL, dma_cntl1);
+
        WREG32(GRBM_INT_CNTL, grbm_int_cntl);
 
        WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3091,6 +3120,16 @@ restart_ih:
                                break;
                        }
                        break;
+               case 146:
+               case 147:
+                       dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+                       dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+                               RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+                       dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+                               RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+                       /* reset addr and status */
+                       WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+                       break;
                case 176: /* CP_INT in ring buffer */
                case 177: /* CP_INT in IB1 */
                case 178: /* CP_INT in IB2 */
@@ -3114,9 +3153,19 @@ restart_ih:
                        } else
                                radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                        break;
+               case 224: /* DMA trap event */
+                       DRM_DEBUG("IH: DMA trap\n");
+                       radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+                       break;
                case 233: /* GUI IDLE */
                        DRM_DEBUG("IH: GUI idle\n");
                        break;
+               case 244: /* DMA trap event */
+                       if (rdev->family >= CHIP_CAYMAN) {
+                               DRM_DEBUG("IH: DMA1 trap\n");
+                               radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+                       }
+                       break;
                default:
                        DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
                        break;
@@ -3142,6 +3191,143 @@ restart_ih:
        return IRQ_HANDLED;
 }
 
+/**
+ * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (evergreen-SI).
+ */
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+                                  struct radeon_fence *fence)
+{
+       struct radeon_ring *ring = &rdev->ring[fence->ring];
+       u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+       /* write the fence */
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+       radeon_ring_write(ring, addr & 0xfffffffc);
+       radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+       radeon_ring_write(ring, fence->seq);
+       /* generate an interrupt */
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+       /* flush HDP */
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+       radeon_ring_write(ring, (0xf << 16) | HDP_MEM_COHERENCY_FLUSH_CNTL);
+       radeon_ring_write(ring, 1);
+}
+
+/**
+ * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (evergreen).
+ */
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+                                  struct radeon_ib *ib)
+{
+       struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+       if (rdev->wb.enabled) {
+               u32 next_rptr = ring->wptr + 4;
+               while ((next_rptr & 7) != 5)
+                       next_rptr++;
+               next_rptr += 3;
+               radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+               radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+               radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+               radeon_ring_write(ring, next_rptr);
+       }
+
+       /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+        * Pad as necessary with NOPs.
+        */
+       while ((ring->wptr & 7) != 5)
+               radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+       radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+       radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * evergreen_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (evergreen-cayman).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int evergreen_copy_dma(struct radeon_device *rdev,
+                      uint64_t src_offset, uint64_t dst_offset,
+                      unsigned num_gpu_pages,
+                      struct radeon_fence **fence)
+{
+       struct radeon_semaphore *sem = NULL;
+       int ring_index = rdev->asic->copy.dma_ring_index;
+       struct radeon_ring *ring = &rdev->ring[ring_index];
+       u32 size_in_dw, cur_size_in_dw;
+       int i, num_loops;
+       int r = 0;
+
+       r = radeon_semaphore_create(rdev, &sem);
+       if (r) {
+               DRM_ERROR("radeon: moving bo (%d).\n", r);
+               return r;
+       }
+
+       size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+       num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
+       r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+       if (r) {
+               DRM_ERROR("radeon: moving bo (%d).\n", r);
+               radeon_semaphore_free(rdev, &sem, NULL);
+               return r;
+       }
+
+       if (radeon_fence_need_sync(*fence, ring->idx)) {
+               radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+                                           ring->idx);
+               radeon_fence_note_sync(*fence, ring->idx);
+       } else {
+               radeon_semaphore_free(rdev, &sem, NULL);
+       }
+
+       for (i = 0; i < num_loops; i++) {
+               cur_size_in_dw = size_in_dw;
+               if (cur_size_in_dw > 0xFFFFF)
+                       cur_size_in_dw = 0xFFFFF;
+               size_in_dw -= cur_size_in_dw;
+               radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+               radeon_ring_write(ring, dst_offset & 0xfffffffc);
+               radeon_ring_write(ring, src_offset & 0xfffffffc);
+               radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+               radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+               src_offset += cur_size_in_dw * 4;
+               dst_offset += cur_size_in_dw * 4;
+       }
+
+       r = radeon_fence_emit(rdev, fence, ring->idx);
+       if (r) {
+               radeon_ring_unlock_undo(rdev, ring);
+               return r;
+       }
+
+       radeon_ring_unlock_commit(rdev, ring);
+       radeon_semaphore_free(rdev, &sem, *fence);
+
+       return r;
+}
+
 static int evergreen_startup(struct radeon_device *rdev)
 {
        struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -3205,6 +3391,12 @@ static int evergreen_startup(struct radeon_device *rdev)
                return r;
        }
 
+       r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
@@ -3219,12 +3411,23 @@ static int evergreen_startup(struct radeon_device *rdev)
                             0, 0xfffff, RADEON_CP_PACKET2);
        if (r)
                return r;
+
+       ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+       r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+                            DMA_RB_RPTR, DMA_RB_WPTR,
+                            2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+       if (r)
+               return r;
+
        r = evergreen_cp_load_microcode(rdev);
        if (r)
                return r;
        r = evergreen_cp_resume(rdev);
        if (r)
                return r;
+       r = r600_dma_resume(rdev);
+       if (r)
+               return r;
 
        r = radeon_ib_pool_init(rdev);
        if (r) {
@@ -3271,11 +3474,9 @@ int evergreen_resume(struct radeon_device *rdev)
 
 int evergreen_suspend(struct radeon_device *rdev)
 {
-       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
-
        r600_audio_fini(rdev);
        r700_cp_stop(rdev);
-       ring->ready = false;
+       r600_dma_stop(rdev);
        evergreen_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        evergreen_pcie_gart_disable(rdev);
@@ -3352,6 +3553,9 @@ int evergreen_init(struct radeon_device *rdev)
        rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
        r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
+       rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+       r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
 
@@ -3364,6 +3568,7 @@ int evergreen_init(struct radeon_device *rdev)
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                r700_cp_fini(rdev);
+               r600_dma_fini(rdev);
                r600_irq_fini(rdev);
                radeon_wb_fini(rdev);
                radeon_ib_pool_fini(rdev);
@@ -3391,6 +3596,7 @@ void evergreen_fini(struct radeon_device *rdev)
        r600_audio_fini(rdev);
        r600_blit_fini(rdev);
        r700_cp_fini(rdev);
+       r600_dma_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
        radeon_ib_pool_fini(rdev);
index 95e6318..74c6b42 100644 (file)
@@ -34,6 +34,8 @@
 #define MAX(a,b)                   (((a)>(b))?(a):(b))
 #define MIN(a,b)                   (((a)<(b))?(a):(b))
 
+int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+                          struct radeon_cs_reloc **cs_reloc);
 static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
                                          struct radeon_cs_reloc **cs_reloc);
 
@@ -507,20 +509,28 @@ static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
                /* height is npipes htiles aligned == npipes * 8 pixel aligned */
                nby = round_up(nby, track->npipes * 8);
        } else {
+               /* always assume 8x8 htile */
+               /* align is htile align * 8, htile align vary according to
+                * number of pipe and tile width and nby
+                */
                switch (track->npipes) {
                case 8:
+                       /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
                        nbx = round_up(nbx, 64 * 8);
                        nby = round_up(nby, 64 * 8);
                        break;
                case 4:
+                       /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
                        nbx = round_up(nbx, 64 * 8);
                        nby = round_up(nby, 32 * 8);
                        break;
                case 2:
+                       /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
                        nbx = round_up(nbx, 32 * 8);
                        nby = round_up(nby, 32 * 8);
                        break;
                case 1:
+                       /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
                        nbx = round_up(nbx, 32 * 8);
                        nby = round_up(nby, 16 * 8);
                        break;
@@ -531,9 +541,10 @@ static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
                }
        }
        /* compute number of htile */
-       nbx = nbx / 8;
-       nby = nby / 8;
-       size = nbx * nby * 4;
+       nbx = nbx >> 3;
+       nby = nby >> 3;
+       /* size must be aligned on npipes * 2K boundary */
+       size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
        size += track->htile_offset;
 
        if (size > radeon_bo_size(track->htile_bo)) {
@@ -1790,6 +1801,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
        case DB_HTILE_SURFACE:
                /* 8x8 only */
                track->htile_surface = radeon_get_ib_value(p, idx);
+               /* force 8x8 htile width and height */
+               ib[idx] |= 3;
                track->db_dirty = true;
                break;
        case CB_IMMED0_BASE:
@@ -2232,6 +2245,107 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                        ib[idx+2] = upper_32_bits(offset) & 0xff;
                }
                break;
+       case PACKET3_CP_DMA:
+       {
+               u32 command, size, info;
+               u64 offset, tmp;
+               if (pkt->count != 4) {
+                       DRM_ERROR("bad CP DMA\n");
+                       return -EINVAL;
+               }
+               command = radeon_get_ib_value(p, idx+4);
+               size = command & 0x1fffff;
+               info = radeon_get_ib_value(p, idx+1);
+               if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
+                   (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
+                   ((((info & 0x00300000) >> 20) == 0) &&
+                    (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
+                   ((((info & 0x60000000) >> 29) == 0) &&
+                    (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
+                       /* non mem to mem copies requires dw aligned count */
+                       if (size % 4) {
+                               DRM_ERROR("CP DMA command requires dw count alignment\n");
+                               return -EINVAL;
+                       }
+               }
+               if (command & PACKET3_CP_DMA_CMD_SAS) {
+                       /* src address space is register */
+                       /* GDS is ok */
+                       if (((info & 0x60000000) >> 29) != 1) {
+                               DRM_ERROR("CP DMA SAS not supported\n");
+                               return -EINVAL;
+                       }
+               } else {
+                       if (command & PACKET3_CP_DMA_CMD_SAIC) {
+                               DRM_ERROR("CP DMA SAIC only supported for registers\n");
+                               return -EINVAL;
+                       }
+                       /* src address space is memory */
+                       if (((info & 0x60000000) >> 29) == 0) {
+                               r = evergreen_cs_packet_next_reloc(p, &reloc);
+                               if (r) {
+                                       DRM_ERROR("bad CP DMA SRC\n");
+                                       return -EINVAL;
+                               }
+
+                               tmp = radeon_get_ib_value(p, idx) +
+                                       ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+                               offset = reloc->lobj.gpu_offset + tmp;
+
+                               if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+                                       dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+                                                tmp + size, radeon_bo_size(reloc->robj));
+                                       return -EINVAL;
+                               }
+
+                               ib[idx] = offset;
+                               ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+                       } else if (((info & 0x60000000) >> 29) != 2) {
+                               DRM_ERROR("bad CP DMA SRC_SEL\n");
+                               return -EINVAL;
+                       }
+               }
+               if (command & PACKET3_CP_DMA_CMD_DAS) {
+                       /* dst address space is register */
+                       /* GDS is ok */
+                       if (((info & 0x00300000) >> 20) != 1) {
+                               DRM_ERROR("CP DMA DAS not supported\n");
+                               return -EINVAL;
+                       }
+               } else {
+                       /* dst address space is memory */
+                       if (command & PACKET3_CP_DMA_CMD_DAIC) {
+                               DRM_ERROR("CP DMA DAIC only supported for registers\n");
+                               return -EINVAL;
+                       }
+                       if (((info & 0x00300000) >> 20) == 0) {
+                               r = evergreen_cs_packet_next_reloc(p, &reloc);
+                               if (r) {
+                                       DRM_ERROR("bad CP DMA DST\n");
+                                       return -EINVAL;
+                               }
+
+                               tmp = radeon_get_ib_value(p, idx+2) +
+                                       ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
+
+                               offset = reloc->lobj.gpu_offset + tmp;
+
+                               if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+                                       dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+                                                tmp + size, radeon_bo_size(reloc->robj));
+                                       return -EINVAL;
+                               }
+
+                               ib[idx+2] = offset;
+                               ib[idx+3] = upper_32_bits(offset) & 0xff;
+                       } else {
+                               DRM_ERROR("bad CP DMA DST_SEL\n");
+                               return -EINVAL;
+                       }
+               }
+               break;
+       }
        case PACKET3_SURFACE_SYNC:
                if (pkt->count != 3) {
                        DRM_ERROR("bad SURFACE_SYNC\n");
@@ -2715,6 +2829,455 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
        return 0;
 }
 
+/*
+ *  DMA
+ */
+
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
+#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
+#define GET_DMA_NEW(h) (((h) & 0x04000000) >> 26)
+#define GET_DMA_MISC(h) (((h) & 0x0700000) >> 20)
+
+/**
+ * evergreen_dma_cs_parse() - parse the DMA IB
+ * @p:         parser structure holding parsing context.
+ *
+ * Parses the DMA IB from the CS ioctl and updates
+ * the GPU addresses based on the reloc information and
+ * checks for errors. (Evergreen-Cayman)
+ * Returns 0 for success and an error on failure.
+ **/
+int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
+{
+       struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+       struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
+       u32 header, cmd, count, tiled, new_cmd, misc;
+       volatile u32 *ib = p->ib.ptr;
+       u32 idx, idx_value;
+       u64 src_offset, dst_offset, dst2_offset;
+       int r;
+
+       do {
+               if (p->idx >= ib_chunk->length_dw) {
+                       DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+                                 p->idx, ib_chunk->length_dw);
+                       return -EINVAL;
+               }
+               idx = p->idx;
+               header = radeon_get_ib_value(p, idx);
+               cmd = GET_DMA_CMD(header);
+               count = GET_DMA_COUNT(header);
+               tiled = GET_DMA_T(header);
+               new_cmd = GET_DMA_NEW(header);
+               misc = GET_DMA_MISC(header);
+
+               switch (cmd) {
+               case DMA_PACKET_WRITE:
+                       r = r600_dma_cs_next_reloc(p, &dst_reloc);
+                       if (r) {
+                               DRM_ERROR("bad DMA_PACKET_WRITE\n");
+                               return -EINVAL;
+                       }
+                       if (tiled) {
+                               dst_offset = ib[idx+1];
+                               dst_offset <<= 8;
+
+                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                               p->idx += count + 7;
+                       } else {
+                               dst_offset = ib[idx+1];
+                               dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
+
+                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                               ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                               p->idx += count + 3;
+                       }
+                       if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                               dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
+                                        dst_offset, radeon_bo_size(dst_reloc->robj));
+                               return -EINVAL;
+                       }
+                       break;
+               case DMA_PACKET_COPY:
+                       r = r600_dma_cs_next_reloc(p, &src_reloc);
+                       if (r) {
+                               DRM_ERROR("bad DMA_PACKET_COPY\n");
+                               return -EINVAL;
+                       }
+                       r = r600_dma_cs_next_reloc(p, &dst_reloc);
+                       if (r) {
+                               DRM_ERROR("bad DMA_PACKET_COPY\n");
+                               return -EINVAL;
+                       }
+                       if (tiled) {
+                               idx_value = radeon_get_ib_value(p, idx + 2);
+                               if (new_cmd) {
+                                       switch (misc) {
+                                       case 0:
+                                               /* L2T, frame to fields */
+                                               if (idx_value & (1 << 31)) {
+                                                       DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+                                                       return -EINVAL;
+                                               }
+                                               r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+                                               if (r) {
+                                                       DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+                                                       return -EINVAL;
+                                               }
+                                               dst_offset = ib[idx+1];
+                                               dst_offset <<= 8;
+                                               dst2_offset = ib[idx+2];
+                                               dst2_offset <<= 8;
+                                               src_offset = ib[idx+8];
+                                               src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+                                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
+                                                                src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+                                                                dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+                                                                dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                                               ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+                                               ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                                               ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                                               p->idx += 10;
+                                               break;
+                                       case 1:
+                                               /* L2T, T2L partial */
+                                               if (p->family < CHIP_CAYMAN) {
+                                                       DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+                                                       return -EINVAL;
+                                               }
+                                               /* detile bit */
+                                               if (idx_value & (1 << 31)) {
+                                                       /* tiled src, linear dst */
+                                                       ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+                                                       ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                                                       ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                                               } else {
+                                                       /* linear src, tiled dst */
+                                                       ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                                                       ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+                                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                                               }
+                                               p->idx += 12;
+                                               break;
+                                       case 3:
+                                               /* L2T, broadcast */
+                                               if (idx_value & (1 << 31)) {
+                                                       DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+                                                       return -EINVAL;
+                                               }
+                                               r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+                                               if (r) {
+                                                       DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+                                                       return -EINVAL;
+                                               }
+                                               dst_offset = ib[idx+1];
+                                               dst_offset <<= 8;
+                                               dst2_offset = ib[idx+2];
+                                               dst2_offset <<= 8;
+                                               src_offset = ib[idx+8];
+                                               src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+                                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+                                                                src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+                                                                dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+                                                                dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                                               ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+                                               ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                                               ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                                               p->idx += 10;
+                                               break;
+                                       case 4:
+                                               /* L2T, T2L */
+                                               /* detile bit */
+                                               if (idx_value & (1 << 31)) {
+                                                       /* tiled src, linear dst */
+                                                       src_offset = ib[idx+1];
+                                                       src_offset <<= 8;
+                                                       ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+                                                       dst_offset = ib[idx+7];
+                                                       dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+                                                       ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                                                       ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                                               } else {
+                                                       /* linear src, tiled dst */
+                                                       src_offset = ib[idx+7];
+                                                       src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+                                                       ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                                                       ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+                                                       dst_offset = ib[idx+1];
+                                                       dst_offset <<= 8;
+                                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                                               }
+                                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
+                                                                src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
+                                                                dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               p->idx += 9;
+                                               break;
+                                       case 5:
+                                               /* T2T partial */
+                                               if (p->family < CHIP_CAYMAN) {
+                                                       DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+                                                       return -EINVAL;
+                                               }
+                                               ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+                                               ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                                               p->idx += 13;
+                                               break;
+                                       case 7:
+                                               /* L2T, broadcast */
+                                               if (idx_value & (1 << 31)) {
+                                                       DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+                                                       return -EINVAL;
+                                               }
+                                               r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+                                               if (r) {
+                                                       DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+                                                       return -EINVAL;
+                                               }
+                                               dst_offset = ib[idx+1];
+                                               dst_offset <<= 8;
+                                               dst2_offset = ib[idx+2];
+                                               dst2_offset <<= 8;
+                                               src_offset = ib[idx+8];
+                                               src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+                                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+                                                                src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+                                                                dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+                                                                dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                                               ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+                                               ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                                               ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                                               p->idx += 10;
+                                               break;
+                                       default:
+                                               DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+                                               return -EINVAL;
+                                       }
+                               } else {
+                                       switch (misc) {
+                                       case 0:
+                                               /* detile bit */
+                                               if (idx_value & (1 << 31)) {
+                                                       /* tiled src, linear dst */
+                                                       src_offset = ib[idx+1];
+                                                       src_offset <<= 8;
+                                                       ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+                                                       dst_offset = ib[idx+7];
+                                                       dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+                                                       ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                                                       ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                                               } else {
+                                                       /* linear src, tiled dst */
+                                                       src_offset = ib[idx+7];
+                                                       src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+                                                       ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                                                       ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+                                                       dst_offset = ib[idx+1];
+                                                       dst_offset <<= 8;
+                                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                                               }
+                                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+                                                                src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+                                                                dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               p->idx += 9;
+                                               break;
+                                       default:
+                                               DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+                                               return -EINVAL;
+                                       }
+                               }
+                       } else {
+                               if (new_cmd) {
+                                       switch (misc) {
+                                       case 0:
+                                               /* L2L, byte */
+                                               src_offset = ib[idx+2];
+                                               src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+                                               dst_offset = ib[idx+1];
+                                               dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+                                               if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
+                                                                src_offset + count, radeon_bo_size(src_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
+                                                                dst_offset + count, radeon_bo_size(dst_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+                                               ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+                                               ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                                               ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                                               p->idx += 5;
+                                               break;
+                                       case 1:
+                                               /* L2L, partial */
+                                               if (p->family < CHIP_CAYMAN) {
+                                                       DRM_ERROR("L2L Partial is cayman only !\n");
+                                                       return -EINVAL;
+                                               }
+                                               ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+                                               ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                                               ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+                                               ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+
+                                               p->idx += 9;
+                                               break;
+                                       case 4:
+                                               /* L2L, dw, broadcast */
+                                               r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+                                               if (r) {
+                                                       DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
+                                                       return -EINVAL;
+                                               }
+                                               dst_offset = ib[idx+1];
+                                               dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+                                               dst2_offset = ib[idx+2];
+                                               dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32;
+                                               src_offset = ib[idx+3];
+                                               src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+                                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
+                                                                src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
+                                                                dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
+                                                                dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                                               ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
+                                               ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                                               ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                                               ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
+                                               ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                                               p->idx += 7;
+                                               break;
+                                       default:
+                                               DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+                                               return -EINVAL;
+                                       }
+                               } else {
+                                       /* L2L, dw */
+                                       src_offset = ib[idx+2];
+                                       src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+                                       dst_offset = ib[idx+1];
+                                       dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+                                       if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                                               dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
+                                                        src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                                               return -EINVAL;
+                                       }
+                                       if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                                               dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
+                                                        dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                                               return -EINVAL;
+                                       }
+                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                                       ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                                       ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                                       ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                                       p->idx += 5;
+                               }
+                       }
+                       break;
+               case DMA_PACKET_CONSTANT_FILL:
+                       r = r600_dma_cs_next_reloc(p, &dst_reloc);
+                       if (r) {
+                               DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
+                               return -EINVAL;
+                       }
+                       dst_offset = ib[idx+1];
+                       dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
+                       if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                               dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+                                        dst_offset, radeon_bo_size(dst_reloc->robj));
+                               return -EINVAL;
+                       }
+                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                       ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+                       p->idx += 4;
+                       break;
+               case DMA_PACKET_NOP:
+                       p->idx += 1;
+                       break;
+               default:
+                       DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+                       return -EINVAL;
+               }
+       } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+       for (r = 0; r < p->ib->length_dw; r++) {
+               printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
+               mdelay(1);
+       }
+#endif
+       return 0;
+}
+
 /* vm parser */
 static bool evergreen_vm_reg_valid(u32 reg)
 {
@@ -2725,6 +3288,9 @@ static bool evergreen_vm_reg_valid(u32 reg)
        /* check config regs */
        switch (reg) {
        case GRBM_GFX_INDEX:
+       case CP_STRMOUT_CNTL:
+       case CP_COHER_CNTL:
+       case CP_COHER_SIZE:
        case VGT_VTX_VECT_EJECT_REG:
        case VGT_CACHE_INVALIDATION:
        case VGT_GS_VERTEX_REUSE:
@@ -2840,6 +3406,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
        u32 idx = pkt->idx + 1;
        u32 idx_value = ib[idx];
        u32 start_reg, end_reg, reg, i;
+       u32 command, info;
 
        switch (pkt->opcode) {
        case PACKET3_NOP:
@@ -2914,6 +3481,64 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
                                return -EINVAL;
                }
                break;
+       case PACKET3_CP_DMA:
+               command = ib[idx + 4];
+               info = ib[idx + 1];
+               if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
+                   (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
+                   ((((info & 0x00300000) >> 20) == 0) &&
+                    (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
+                   ((((info & 0x60000000) >> 29) == 0) &&
+                    (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
+                       /* non mem to mem copies requires dw aligned count */
+                       if ((command & 0x1fffff) % 4) {
+                               DRM_ERROR("CP DMA command requires dw count alignment\n");
+                               return -EINVAL;
+                       }
+               }
+               if (command & PACKET3_CP_DMA_CMD_SAS) {
+                       /* src address space is register */
+                       if (((info & 0x60000000) >> 29) == 0) {
+                               start_reg = idx_value << 2;
+                               if (command & PACKET3_CP_DMA_CMD_SAIC) {
+                                       reg = start_reg;
+                                       if (!evergreen_vm_reg_valid(reg)) {
+                                               DRM_ERROR("CP DMA Bad SRC register\n");
+                                               return -EINVAL;
+                                       }
+                               } else {
+                                       for (i = 0; i < (command & 0x1fffff); i++) {
+                                               reg = start_reg + (4 * i);
+                                               if (!evergreen_vm_reg_valid(reg)) {
+                                                       DRM_ERROR("CP DMA Bad SRC register\n");
+                                                       return -EINVAL;
+                                               }
+                                       }
+                               }
+                       }
+               }
+               if (command & PACKET3_CP_DMA_CMD_DAS) {
+                       /* dst address space is register */
+                       if (((info & 0x00300000) >> 20) == 0) {
+                               start_reg = ib[idx + 2];
+                               if (command & PACKET3_CP_DMA_CMD_DAIC) {
+                                       reg = start_reg;
+                                       if (!evergreen_vm_reg_valid(reg)) {
+                                               DRM_ERROR("CP DMA Bad DST register\n");
+                                               return -EINVAL;
+                                       }
+                               } else {
+                                       for (i = 0; i < (command & 0x1fffff); i++) {
+                                               reg = start_reg + (4 * i);
+                                               if (!evergreen_vm_reg_valid(reg)) {
+                                                       DRM_ERROR("CP DMA Bad DST register\n");
+                                                       return -EINVAL;
+                                               }
+                                       }
+                               }
+                       }
+               }
+               break;
        default:
                return -EINVAL;
        }
@@ -2955,3 +3580,114 @@ int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
 
        return ret;
 }
+
+/**
+ * evergreen_dma_ib_parse() - parse the DMA IB for VM
+ * @rdev: radeon_device pointer
+ * @ib:        radeon_ib pointer
+ *
+ * Parses the DMA IB from the VM CS ioctl
+ * checks for errors. (Cayman-SI)
+ * Returns 0 for success and an error on failure.
+ **/
+int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+       u32 idx = 0;
+       u32 header, cmd, count, tiled, new_cmd, misc;
+
+       do {
+               header = ib->ptr[idx];
+               cmd = GET_DMA_CMD(header);
+               count = GET_DMA_COUNT(header);
+               tiled = GET_DMA_T(header);
+               new_cmd = GET_DMA_NEW(header);
+               misc = GET_DMA_MISC(header);
+
+               switch (cmd) {
+               case DMA_PACKET_WRITE:
+                       if (tiled)
+                               idx += count + 7;
+                       else
+                               idx += count + 3;
+                       break;
+               case DMA_PACKET_COPY:
+                       if (tiled) {
+                               if (new_cmd) {
+                                       switch (misc) {
+                                       case 0:
+                                               /* L2T, frame to fields */
+                                               idx += 10;
+                                               break;
+                                       case 1:
+                                               /* L2T, T2L partial */
+                                               idx += 12;
+                                               break;
+                                       case 3:
+                                               /* L2T, broadcast */
+                                               idx += 10;
+                                               break;
+                                       case 4:
+                                               /* L2T, T2L */
+                                               idx += 9;
+                                               break;
+                                       case 5:
+                                               /* T2T partial */
+                                               idx += 13;
+                                               break;
+                                       case 7:
+                                               /* L2T, broadcast */
+                                               idx += 10;
+                                               break;
+                                       default:
+                                               DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+                                               return -EINVAL;
+                                       }
+                               } else {
+                                       switch (misc) {
+                                       case 0:
+                                               idx += 9;
+                                               break;
+                                       default:
+                                               DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+                                               return -EINVAL;
+                                       }
+                               }
+                       } else {
+                               if (new_cmd) {
+                                       switch (misc) {
+                                       case 0:
+                                               /* L2L, byte */
+                                               idx += 5;
+                                               break;
+                                       case 1:
+                                               /* L2L, partial */
+                                               idx += 9;
+                                               break;
+                                       case 4:
+                                               /* L2L, dw, broadcast */
+                                               idx += 7;
+                                               break;
+                                       default:
+                                               DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+                                               return -EINVAL;
+                                       }
+                               } else {
+                                       /* L2L, dw */
+                                       idx += 5;
+                               }
+                       }
+                       break;
+               case DMA_PACKET_CONSTANT_FILL:
+                       idx += 4;
+                       break;
+               case DMA_PACKET_NOP:
+                       idx += 1;
+                       break;
+               default:
+                       DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+                       return -EINVAL;
+               }
+       } while (idx < ib->length_dw);
+
+       return 0;
+}
index df542f1..cb9baaa 100644 (file)
@@ -45,6 +45,8 @@
 #define TURKS_GB_ADDR_CONFIG_GOLDEN          0x02010002
 #define CEDAR_GB_ADDR_CONFIG_GOLDEN          0x02010001
 #define CAICOS_GB_ADDR_CONFIG_GOLDEN         0x02010001
+#define SUMO_GB_ADDR_CONFIG_GOLDEN           0x02010002
+#define SUMO2_GB_ADDR_CONFIG_GOLDEN          0x02010002
 
 /* Registers */
 
 #define                FB_READ_EN                                      (1 << 0)
 #define                FB_WRITE_EN                                     (1 << 1)
 
+#define        CP_STRMOUT_CNTL                                 0x84FC
+
+#define        CP_COHER_CNTL                                   0x85F0
+#define        CP_COHER_SIZE                                   0x85F4
 #define        CP_COHER_BASE                                   0x85F8
 #define        CP_STALLED_STAT1                        0x8674
 #define        CP_STALLED_STAT2                        0x8678
 #       define AFMT_MPEG_INFO_UPDATE         (1 << 10)
 #define AFMT_GENERIC0_7                      0x7138
 
+/* DCE4/5 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0        0x5f84 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1        0x5f88 /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2        0x5f8c /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3        0x5f90 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4        0x5f94 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5        0x5f98 /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6        0x5f9c /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7        0x5fa0 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8        0x5fa4 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9        0x5fa8 /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10       0x5fac /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11       0x5fb0 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12       0x5fb4 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13       0x5fb8 /* WMA Pro */
+#       define MAX_CHANNELS(x)                            (((x) & 0x7) << 0)
+/* max channels minus one.  7 = 8 channels */
+#       define SUPPORTED_FREQUENCIES(x)                   (((x) & 0xff) << 8)
+#       define DESCRIPTOR_BYTE_2(x)                       (((x) & 0xff) << 16)
+#       define SUPPORTED_FREQUENCIES_STEREO(x)            (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_HOT_PLUG_CONTROL                               0x5e78
+#       define AZ_FORCE_CODEC_WAKE                        (1 << 0)
+#       define PIN0_JACK_DETECTION_ENABLE                 (1 << 4)
+#       define PIN1_JACK_DETECTION_ENABLE                 (1 << 5)
+#       define PIN2_JACK_DETECTION_ENABLE                 (1 << 6)
+#       define PIN3_JACK_DETECTION_ENABLE                 (1 << 7)
+#       define PIN0_UNSOLICITED_RESPONSE_ENABLE           (1 << 8)
+#       define PIN1_UNSOLICITED_RESPONSE_ENABLE           (1 << 9)
+#       define PIN2_UNSOLICITED_RESPONSE_ENABLE           (1 << 10)
+#       define PIN3_UNSOLICITED_RESPONSE_ENABLE           (1 << 11)
+#       define CODEC_HOT_PLUG_ENABLE                      (1 << 12)
+#       define PIN0_AUDIO_ENABLED                         (1 << 24)
+#       define PIN1_AUDIO_ENABLED                         (1 << 25)
+#       define PIN2_AUDIO_ENABLED                         (1 << 26)
+#       define PIN3_AUDIO_ENABLED                         (1 << 27)
+#       define AUDIO_ENABLED                              (1 << 31)
+
+
 #define        GC_USER_SHADER_PIPE_CONFIG                      0x8954
 #define                INACTIVE_QD_PIPES(x)                            ((x) << 8)
 #define                INACTIVE_QD_PIPES_MASK                          0x0000FF00
 #define                PAGE_TABLE_DEPTH(x)                             (((x) & 3) << 1)
 #define                RANGE_PROTECTION_FAULT_ENABLE_DEFAULT           (1 << 4)
 #define VM_CONTEXT1_CNTL                               0x1414
+#define VM_CONTEXT1_CNTL2                              0x1434
 #define        VM_CONTEXT0_PAGE_TABLE_BASE_ADDR                0x153C
 #define        VM_CONTEXT0_PAGE_TABLE_END_ADDR                 0x157C
 #define        VM_CONTEXT0_PAGE_TABLE_START_ADDR               0x155C
 #define                CACHE_UPDATE_MODE(x)                            ((x) << 6)
 #define        VM_L2_STATUS                                    0x140C
 #define                L2_BUSY                                         (1 << 0)
+#define        VM_CONTEXT1_PROTECTION_FAULT_ADDR               0x14FC
+#define        VM_CONTEXT1_PROTECTION_FAULT_STATUS             0x14DC
 
 #define        WAIT_UNTIL                                      0x8040
 
 #       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
 #       define DC_HPDx_EN                                 (1 << 28)
 
+/* ASYNC DMA */
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_TILING_CONFIG                                0xD0B8
+
+#define CAYMAN_DMA1_CNTL                                  0xd82c
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n)       ((((cmd) & 0xF) << 28) |        \
+                                        (((t) & 0x1) << 23) |          \
+                                        (((s) & 0x1) << 22) |          \
+                                        (((n) & 0xFFFFF) << 0))
+/* async DMA Packet types */
+#define        DMA_PACKET_WRITE                                  0x2
+#define        DMA_PACKET_COPY                                   0x3
+#define        DMA_PACKET_INDIRECT_BUFFER                        0x4
+#define        DMA_PACKET_SEMAPHORE                              0x5
+#define        DMA_PACKET_FENCE                                  0x6
+#define        DMA_PACKET_TRAP                                   0x7
+#define        DMA_PACKET_SRBM_WRITE                             0x9
+#define        DMA_PACKET_CONSTANT_FILL                          0xd
+#define        DMA_PACKET_NOP                                    0xf
+
 /* PCIE link stuff */
 #define PCIE_LC_TRAINING_CNTL                             0xa1 /* PCIE_P */
 #define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
 #define        PACKET3_WAIT_REG_MEM                            0x3C
 #define        PACKET3_MEM_WRITE                               0x3D
 #define        PACKET3_INDIRECT_BUFFER                         0x32
+#define        PACKET3_CP_DMA                                  0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ *    SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+#              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
+                /* 0 - SRC_ADDR
+                * 1 - GDS
+                */
+#              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
+                /* 0 - ME
+                * 1 - PFP
+                */
+#              define PACKET3_CP_DMA_SRC_SEL(x)    ((x) << 29)
+                /* 0 - SRC_ADDR
+                * 1 - GDS
+                * 2 - DATA
+                */
+#              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
+/* COMMAND */
+#              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+                /* 0 - none
+                * 1 - 8 in 16
+                * 2 - 8 in 32
+                * 3 - 8 in 64
+                */
+#              define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+                * 1 - 8 in 16
+                * 2 - 8 in 32
+                * 3 - 8 in 64
+                */
+#              define PACKET3_CP_DMA_CMD_SAS       (1 << 26)
+                /* 0 - memory
+                * 1 - register
+                */
+#              define PACKET3_CP_DMA_CMD_DAS       (1 << 27)
+                /* 0 - memory
+                * 1 - register
+                */
+#              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
+#              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
 #define        PACKET3_SURFACE_SYNC                            0x43
 #              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
 #              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
index 81e6a56..39e8be1 100644 (file)
@@ -611,6 +611,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
        WREG32(GB_ADDR_CONFIG, gb_addr_config);
        WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+       WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+       WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
 
        tmp = gb_addr_config & NUM_PIPES_MASK;
        tmp = r6xx_remap_render_backend(rdev, tmp,
@@ -784,10 +786,20 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
        /* enable context1-7 */
        WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
               (u32)(rdev->dummy_page.addr >> 12));
-       WREG32(VM_CONTEXT1_CNTL2, 0);
-       WREG32(VM_CONTEXT1_CNTL, 0);
+       WREG32(VM_CONTEXT1_CNTL2, 4);
        WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
-                               RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+                               RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
 
        cayman_pcie_gart_tlb_flush(rdev);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -905,6 +917,7 @@ static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
                radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
                WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
                WREG32(SCRATCH_UMSK, 0);
+               rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
        }
 }
 
@@ -1118,6 +1131,181 @@ static int cayman_cp_resume(struct radeon_device *rdev)
        return 0;
 }
 
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine.  The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things.  It also
+ * has support for tiling/detiling of buffers.
+ * Cayman and newer support two asynchronous DMA engines.
+ */
+/**
+ * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (cayman-SI).
+ */
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+                               struct radeon_ib *ib)
+{
+       struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+       if (rdev->wb.enabled) {
+               u32 next_rptr = ring->wptr + 4;
+               while ((next_rptr & 7) != 5)
+                       next_rptr++;
+               next_rptr += 3;
+               radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+               radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+               radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+               radeon_ring_write(ring, next_rptr);
+       }
+
+       /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+        * Pad as necessary with NOPs.
+        */
+       while ((ring->wptr & 7) != 5)
+               radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+       radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
+       radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+       radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * cayman_dma_stop - stop the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines (cayman-SI).
+ */
+void cayman_dma_stop(struct radeon_device *rdev)
+{
+       u32 rb_cntl;
+
+       radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+       /* dma0 */
+       rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+       rb_cntl &= ~DMA_RB_ENABLE;
+       WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
+
+       /* dma1 */
+       rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+       rb_cntl &= ~DMA_RB_ENABLE;
+       WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
+
+       rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+       rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
+}
+
+/**
+ * cayman_dma_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffers and enable them. (cayman-SI).
+ * Returns 0 for success, error for failure.
+ */
+int cayman_dma_resume(struct radeon_device *rdev)
+{
+       struct radeon_ring *ring;
+       u32 rb_cntl, dma_cntl;
+       u32 rb_bufsz;
+       u32 reg_offset, wb_offset;
+       int i, r;
+
+       /* Reset dma */
+       WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+       RREG32(SRBM_SOFT_RESET);
+       udelay(50);
+       WREG32(SRBM_SOFT_RESET, 0);
+
+       for (i = 0; i < 2; i++) {
+               if (i == 0) {
+                       ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+                       reg_offset = DMA0_REGISTER_OFFSET;
+                       wb_offset = R600_WB_DMA_RPTR_OFFSET;
+               } else {
+                       ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+                       reg_offset = DMA1_REGISTER_OFFSET;
+                       wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
+               }
+
+               WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
+               WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
+
+               /* Set ring buffer size in dwords */
+               rb_bufsz = drm_order(ring->ring_size / 4);
+               rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+               rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+               WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
+
+               /* Initialize the ring buffer's read and write pointers */
+               WREG32(DMA_RB_RPTR + reg_offset, 0);
+               WREG32(DMA_RB_WPTR + reg_offset, 0);
+
+               /* set the wb address whether it's enabled or not */
+               WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
+                      upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
+               WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
+                      ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
+
+               if (rdev->wb.enabled)
+                       rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+               WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
+
+               /* enable DMA IBs */
+               WREG32(DMA_IB_CNTL + reg_offset, DMA_IB_ENABLE | CMD_VMID_FORCE);
+
+               dma_cntl = RREG32(DMA_CNTL + reg_offset);
+               dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+               WREG32(DMA_CNTL + reg_offset, dma_cntl);
+
+               ring->wptr = 0;
+               WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
+
+               ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
+
+               WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
+
+               ring->ready = true;
+
+               r = radeon_ring_test(rdev, ring->idx, ring);
+               if (r) {
+                       ring->ready = false;
+                       return r;
+               }
+       }
+
+       radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+       return 0;
+}
+
+/**
+ * cayman_dma_fini - tear down the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines and free the rings (cayman-SI).
+ */
+void cayman_dma_fini(struct radeon_device *rdev)
+{
+       cayman_dma_stop(rdev);
+       radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+       radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
+}
+
 static int cayman_gpu_soft_reset(struct radeon_device *rdev)
 {
        struct evergreen_mc_save save;
@@ -1208,6 +1396,32 @@ int cayman_asic_reset(struct radeon_device *rdev)
        return cayman_gpu_soft_reset(rdev);
 }
 
+/**
+ * cayman_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (cayman-SI).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+       u32 dma_status_reg;
+
+       if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+               dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+       else
+               dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+       if (dma_status_reg & DMA_IDLE) {
+               radeon_ring_lockup_update(ring);
+               return false;
+       }
+       /* force ring activities */
+       radeon_ring_force_activity(rdev, ring);
+       return radeon_ring_test_lockup(rdev, ring);
+}
+
 static int cayman_startup(struct radeon_device *rdev)
 {
        struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -1289,6 +1503,18 @@ static int cayman_startup(struct radeon_device *rdev)
                return r;
        }
 
+       r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+               return r;
+       }
+
+       r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
@@ -1303,6 +1529,23 @@ static int cayman_startup(struct radeon_device *rdev)
                             0, 0xfffff, RADEON_CP_PACKET2);
        if (r)
                return r;
+
+       ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+       r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+                            DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
+                            DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
+                            2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+       if (r)
+               return r;
+
+       ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+       r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+                            DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
+                            DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
+                            2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+       if (r)
+               return r;
+
        r = cayman_cp_load_microcode(rdev);
        if (r)
                return r;
@@ -1310,6 +1553,10 @@ static int cayman_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = cayman_dma_resume(rdev);
+       if (r)
+               return r;
+
        r = radeon_ib_pool_init(rdev);
        if (r) {
                dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -1354,7 +1601,7 @@ int cayman_suspend(struct radeon_device *rdev)
 {
        r600_audio_fini(rdev);
        cayman_cp_enable(rdev, false);
-       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+       cayman_dma_stop(rdev);
        evergreen_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        cayman_pcie_gart_disable(rdev);
@@ -1421,6 +1668,14 @@ int cayman_init(struct radeon_device *rdev)
        ring->ring_obj = NULL;
        r600_ring_init(rdev, ring, 1024 * 1024);
 
+       ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+       ring->ring_obj = NULL;
+       r600_ring_init(rdev, ring, 64 * 1024);
+
+       ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+       ring->ring_obj = NULL;
+       r600_ring_init(rdev, ring, 64 * 1024);
+
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
 
@@ -1433,6 +1688,7 @@ int cayman_init(struct radeon_device *rdev)
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                cayman_cp_fini(rdev);
+               cayman_dma_fini(rdev);
                r600_irq_fini(rdev);
                if (rdev->flags & RADEON_IS_IGP)
                        si_rlc_fini(rdev);
@@ -1463,6 +1719,7 @@ void cayman_fini(struct radeon_device *rdev)
 {
        r600_blit_fini(rdev);
        cayman_cp_fini(rdev);
+       cayman_dma_fini(rdev);
        r600_irq_fini(rdev);
        if (rdev->flags & RADEON_IS_IGP)
                si_rlc_fini(rdev);
@@ -1538,30 +1795,57 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
 {
        struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
        uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-
-       while (count) {
-               unsigned ndw = 1 + count * 2;
-               if (ndw > 0x3FFF)
-                       ndw = 0x3FFF;
-
-               radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
-               radeon_ring_write(ring, pe);
-               radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
-               for (; ndw > 1; ndw -= 2, --count, pe += 8) {
-                       uint64_t value = 0;
-                       if (flags & RADEON_VM_PAGE_SYSTEM) {
-                               value = radeon_vm_map_gart(rdev, addr);
-                               value &= 0xFFFFFFFFFFFFF000ULL;
+       uint64_t value;
+       unsigned ndw;
+
+       if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+               while (count) {
+                       ndw = 1 + count * 2;
+                       if (ndw > 0x3FFF)
+                               ndw = 0x3FFF;
+
+                       radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
+                       radeon_ring_write(ring, pe);
+                       radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+                       for (; ndw > 1; ndw -= 2, --count, pe += 8) {
+                               if (flags & RADEON_VM_PAGE_SYSTEM) {
+                                       value = radeon_vm_map_gart(rdev, addr);
+                                       value &= 0xFFFFFFFFFFFFF000ULL;
+                               } else if (flags & RADEON_VM_PAGE_VALID) {
+                                       value = addr;
+                               } else {
+                                       value = 0;
+                               }
                                addr += incr;
-
-                       } else if (flags & RADEON_VM_PAGE_VALID) {
-                               value = addr;
+                               value |= r600_flags;
+                               radeon_ring_write(ring, value);
+                               radeon_ring_write(ring, upper_32_bits(value));
+                       }
+               }
+       } else {
+               while (count) {
+                       ndw = count * 2;
+                       if (ndw > 0xFFFFE)
+                               ndw = 0xFFFFE;
+
+                       /* for non-physically contiguous pages (system) */
+                       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw));
+                       radeon_ring_write(ring, pe);
+                       radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+                       for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+                               if (flags & RADEON_VM_PAGE_SYSTEM) {
+                                       value = radeon_vm_map_gart(rdev, addr);
+                                       value &= 0xFFFFFFFFFFFFF000ULL;
+                               } else if (flags & RADEON_VM_PAGE_VALID) {
+                                       value = addr;
+                               } else {
+                                       value = 0;
+                               }
                                addr += incr;
+                               value |= r600_flags;
+                               radeon_ring_write(ring, value);
+                               radeon_ring_write(ring, upper_32_bits(value));
                        }
-
-                       value |= r600_flags;
-                       radeon_ring_write(ring, value);
-                       radeon_ring_write(ring, upper_32_bits(value));
                }
        }
 }
@@ -1596,3 +1880,26 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
        radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
        radeon_ring_write(ring, 0x0);
 }
+
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+       struct radeon_ring *ring = &rdev->ring[ridx];
+
+       if (vm == NULL)
+               return;
+
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+       radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+       radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+       /* flush hdp cache */
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+       radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+       radeon_ring_write(ring, 1);
+
+       /* bits 0-7 are the VM contexts0-7 */
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+       radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+       radeon_ring_write(ring, 1 << vm->id);
+}
+
index cbef681..b93186b 100644 (file)
 #define                VMID(x)                                         (((x) & 0x7) << 0)
 #define        SRBM_STATUS                                     0x0E50
 
+#define        SRBM_SOFT_RESET                                 0x0E60
+#define                SOFT_RESET_BIF                          (1 << 1)
+#define                SOFT_RESET_CG                           (1 << 2)
+#define                SOFT_RESET_DC                           (1 << 5)
+#define                SOFT_RESET_DMA1                         (1 << 6)
+#define                SOFT_RESET_GRBM                         (1 << 8)
+#define                SOFT_RESET_HDP                          (1 << 9)
+#define                SOFT_RESET_IH                           (1 << 10)
+#define                SOFT_RESET_MC                           (1 << 11)
+#define                SOFT_RESET_RLC                          (1 << 13)
+#define                SOFT_RESET_ROM                          (1 << 14)
+#define                SOFT_RESET_SEM                          (1 << 15)
+#define                SOFT_RESET_VMC                          (1 << 17)
+#define                SOFT_RESET_DMA                          (1 << 20)
+#define                SOFT_RESET_TST                          (1 << 21)
+#define                SOFT_RESET_REGBB                        (1 << 22)
+#define                SOFT_RESET_ORB                          (1 << 23)
+
 #define VM_CONTEXT0_REQUEST_RESPONSE                   0x1470
 #define                REQUEST_TYPE(x)                                 (((x) & 0xf) << 0)
 #define                RESPONSE_TYPE_MASK                              0x000000F0
 #define VM_CONTEXT0_CNTL                               0x1410
 #define                ENABLE_CONTEXT                                  (1 << 0)
 #define                PAGE_TABLE_DEPTH(x)                             (((x) & 3) << 1)
+#define                RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT         (1 << 3)
 #define                RANGE_PROTECTION_FAULT_ENABLE_DEFAULT           (1 << 4)
+#define                DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT    (1 << 6)
+#define                DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT      (1 << 7)
+#define                PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT          (1 << 9)
+#define                PDE0_PROTECTION_FAULT_ENABLE_DEFAULT            (1 << 10)
+#define                VALID_PROTECTION_FAULT_ENABLE_INTERRUPT         (1 << 12)
+#define                VALID_PROTECTION_FAULT_ENABLE_DEFAULT           (1 << 13)
+#define                READ_PROTECTION_FAULT_ENABLE_INTERRUPT          (1 << 15)
+#define                READ_PROTECTION_FAULT_ENABLE_DEFAULT            (1 << 16)
+#define                WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT         (1 << 18)
+#define                WRITE_PROTECTION_FAULT_ENABLE_DEFAULT           (1 << 19)
 #define VM_CONTEXT1_CNTL                               0x1414
 #define VM_CONTEXT0_CNTL2                              0x1430
 #define VM_CONTEXT1_CNTL2                              0x1434
 #define        PACKET3_SET_APPEND_CNT                          0x75
 #define        PACKET3_ME_WRITE                                0x7A
 
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET                              0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET                              0x800 /* not a register */
+
+#define DMA_RB_CNTL                                       0xd000
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_RB_BASE                                       0xd004
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI                               0xd01c
+#define DMA_RB_RPTR_ADDR_LO                               0xd020
+
+#define DMA_IB_CNTL                                       0xd024
+#       define DMA_IB_ENABLE                              (1 << 0)
+#       define DMA_IB_SWAP_ENABLE                         (1 << 4)
+#       define CMD_VMID_FORCE                             (1 << 31)
+#define DMA_IB_RPTR                                       0xd028
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_STATUS_REG                                    0xd034
+#       define DMA_IDLE                                   (1 << 0)
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL                     0xd044
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL                      0xd048
+#define DMA_TILING_CONFIG                                0xd0b8
+#define DMA_MODE                                          0xd0bc
+
+#define DMA_PACKET(cmd, t, s, n)       ((((cmd) & 0xF) << 28) |        \
+                                        (((t) & 0x1) << 23) |          \
+                                        (((s) & 0x1) << 22) |          \
+                                        (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n)    ((((cmd) & 0xF) << 28) |        \
+                                        (((vmid) & 0xF) << 20) |       \
+                                        (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define        DMA_PACKET_WRITE                                  0x2
+#define        DMA_PACKET_COPY                                   0x3
+#define        DMA_PACKET_INDIRECT_BUFFER                        0x4
+#define        DMA_PACKET_SEMAPHORE                              0x5
+#define        DMA_PACKET_FENCE                                  0x6
+#define        DMA_PACKET_TRAP                                   0x7
+#define        DMA_PACKET_SRBM_WRITE                             0x9
+#define        DMA_PACKET_CONSTANT_FILL                          0xd
+#define        DMA_PACKET_NOP                                    0xf
+
 #endif
 
index 376884f..8ff7cac 100644 (file)
@@ -4135,23 +4135,36 @@ int r100_init(struct radeon_device *rdev)
        return 0;
 }
 
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+                     bool always_indirect)
 {
-       if (reg < rdev->rmmio_size)
+       if (reg < rdev->rmmio_size && !always_indirect)
                return readl(((void __iomem *)rdev->rmmio) + reg);
        else {
+               unsigned long flags;
+               uint32_t ret;
+
+               spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
                writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
-               return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+               ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+               spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+
+               return ret;
        }
 }
 
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+                 bool always_indirect)
 {
-       if (reg < rdev->rmmio_size)
+       if (reg < rdev->rmmio_size && !always_indirect)
                writel(v, ((void __iomem *)rdev->rmmio) + reg);
        else {
+               unsigned long flags;
+
+               spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
                writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
                writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+               spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
        }
 }
 
index 169ecc9..2aaf147 100644 (file)
@@ -1370,6 +1370,29 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
        return radeon_ring_test_lockup(rdev, ring);
 }
 
+/**
+ * r600_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (r6xx-evergreen).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+       u32 dma_status_reg;
+
+       dma_status_reg = RREG32(DMA_STATUS_REG);
+       if (dma_status_reg & DMA_IDLE) {
+               radeon_ring_lockup_update(ring);
+               return false;
+       }
+       /* force ring activities */
+       radeon_ring_force_activity(rdev, ring);
+       return radeon_ring_test_lockup(rdev, ring);
+}
+
 int r600_asic_reset(struct radeon_device *rdev)
 {
        return r600_gpu_soft_reset(rdev);
@@ -1588,6 +1611,7 @@ static void r600_gpu_init(struct radeon_device *rdev)
        WREG32(GB_TILING_CONFIG, tiling_config);
        WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
        WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
+       WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
 
        tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
        WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
@@ -1865,6 +1889,7 @@ void r600_cp_stop(struct radeon_device *rdev)
        radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
        WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
        WREG32(SCRATCH_UMSK, 0);
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
 }
 
 int r600_init_microcode(struct radeon_device *rdev)
@@ -2190,6 +2215,128 @@ void r600_cp_fini(struct radeon_device *rdev)
        radeon_scratch_free(rdev, ring->rptr_save_reg);
 }
 
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine.  The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things.  It also
+ * has support for tiling/detiling of buffers.
+ */
+/**
+ * r600_dma_stop - stop the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine (r6xx-evergreen).
+ */
+void r600_dma_stop(struct radeon_device *rdev)
+{
+       u32 rb_cntl = RREG32(DMA_RB_CNTL);
+
+       radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+       rb_cntl &= ~DMA_RB_ENABLE;
+       WREG32(DMA_RB_CNTL, rb_cntl);
+
+       rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+}
+
+/**
+ * r600_dma_resume - setup and start the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_resume(struct radeon_device *rdev)
+{
+       struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+       u32 rb_cntl, dma_cntl;
+       u32 rb_bufsz;
+       int r;
+
+       /* Reset dma */
+       if (rdev->family >= CHIP_RV770)
+               WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
+       else
+               WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+       RREG32(SRBM_SOFT_RESET);
+       udelay(50);
+       WREG32(SRBM_SOFT_RESET, 0);
+
+       WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
+       WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+
+       /* Set ring buffer size in dwords */
+       rb_bufsz = drm_order(ring->ring_size / 4);
+       rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+       rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+       WREG32(DMA_RB_CNTL, rb_cntl);
+
+       /* Initialize the ring buffer's read and write pointers */
+       WREG32(DMA_RB_RPTR, 0);
+       WREG32(DMA_RB_WPTR, 0);
+
+       /* set the wb address whether it's enabled or not */
+       WREG32(DMA_RB_RPTR_ADDR_HI,
+              upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
+       WREG32(DMA_RB_RPTR_ADDR_LO,
+              ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
+
+       if (rdev->wb.enabled)
+               rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+       WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
+
+       /* enable DMA IBs */
+       WREG32(DMA_IB_CNTL, DMA_IB_ENABLE);
+
+       dma_cntl = RREG32(DMA_CNTL);
+       dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+       WREG32(DMA_CNTL, dma_cntl);
+
+       if (rdev->family >= CHIP_RV770)
+               WREG32(DMA_MODE, 1);
+
+       ring->wptr = 0;
+       WREG32(DMA_RB_WPTR, ring->wptr << 2);
+
+       ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
+
+       WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
+
+       ring->ready = true;
+
+       r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
+       if (r) {
+               ring->ready = false;
+               return r;
+       }
+
+       radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+       return 0;
+}
+
+/**
+ * r600_dma_fini - tear down the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine and free the ring (r6xx-evergreen).
+ */
+void r600_dma_fini(struct radeon_device *rdev)
+{
+       r600_dma_stop(rdev);
+       radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+}
 
 /*
  * GPU scratch registers helpers function.
@@ -2246,6 +2393,64 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
        return r;
 }
 
+/**
+ * r600_dma_ring_test - simple async dma engine test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (r6xx-SI).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_ring_test(struct radeon_device *rdev,
+                      struct radeon_ring *ring)
+{
+       unsigned i;
+       int r;
+       void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+       u32 tmp;
+
+       if (!ptr) {
+               DRM_ERROR("invalid vram scratch pointer\n");
+               return -EINVAL;
+       }
+
+       tmp = 0xCAFEDEAD;
+       writel(tmp, ptr);
+
+       r = radeon_ring_lock(rdev, ring, 4);
+       if (r) {
+               DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
+               return r;
+       }
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+       radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
+       radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
+       radeon_ring_write(ring, 0xDEADBEEF);
+       radeon_ring_unlock_commit(rdev, ring);
+
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               tmp = readl(ptr);
+               if (tmp == 0xDEADBEEF)
+                       break;
+               DRM_UDELAY(1);
+       }
+
+       if (i < rdev->usec_timeout) {
+               DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+       } else {
+               DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+                         ring->idx, tmp);
+               r = -EINVAL;
+       }
+       return r;
+}
+
+/*
+ * CP fences/semaphores
+ */
+
 void r600_fence_ring_emit(struct radeon_device *rdev,
                          struct radeon_fence *fence)
 {
@@ -2309,6 +2514,59 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
        radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
 }
 
+/*
+ * DMA fences/semaphores
+ */
+
+/**
+ * r600_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (r6xx-r7xx).
+ */
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+                             struct radeon_fence *fence)
+{
+       struct radeon_ring *ring = &rdev->ring[fence->ring];
+       u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+       /* write the fence */
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+       radeon_ring_write(ring, addr & 0xfffffffc);
+       radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+       radeon_ring_write(ring, lower_32_bits(fence->seq));
+       /* generate an interrupt */
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+}
+
+/**
+ * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @semaphore: radeon semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (r6xx-SI).
+ */
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+                                 struct radeon_ring *ring,
+                                 struct radeon_semaphore *semaphore,
+                                 bool emit_wait)
+{
+       u64 addr = semaphore->gpu_addr;
+       u32 s = emit_wait ? 0 : 1;
+
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
+       radeon_ring_write(ring, addr & 0xfffffffc);
+       radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+}
+
 int r600_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset,
                   uint64_t dst_offset,
@@ -2328,6 +2586,80 @@ int r600_copy_blit(struct radeon_device *rdev,
        return 0;
 }
 
+/**
+ * r600_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r6xx-r7xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int r600_copy_dma(struct radeon_device *rdev,
+                 uint64_t src_offset, uint64_t dst_offset,
+                 unsigned num_gpu_pages,
+                 struct radeon_fence **fence)
+{
+       struct radeon_semaphore *sem = NULL;
+       int ring_index = rdev->asic->copy.dma_ring_index;
+       struct radeon_ring *ring = &rdev->ring[ring_index];
+       u32 size_in_dw, cur_size_in_dw;
+       int i, num_loops;
+       int r = 0;
+
+       r = radeon_semaphore_create(rdev, &sem);
+       if (r) {
+               DRM_ERROR("radeon: moving bo (%d).\n", r);
+               return r;
+       }
+
+       size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+       num_loops = DIV_ROUND_UP(size_in_dw, 0xffff);
+       r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
+       if (r) {
+               DRM_ERROR("radeon: moving bo (%d).\n", r);
+               radeon_semaphore_free(rdev, &sem, NULL);
+               return r;
+       }
+
+       if (radeon_fence_need_sync(*fence, ring->idx)) {
+               radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+                                           ring->idx);
+               radeon_fence_note_sync(*fence, ring->idx);
+       } else {
+               radeon_semaphore_free(rdev, &sem, NULL);
+       }
+
+       for (i = 0; i < num_loops; i++) {
+               cur_size_in_dw = size_in_dw;
+               if (cur_size_in_dw > 0xFFFF)
+                       cur_size_in_dw = 0xFFFF;
+               size_in_dw -= cur_size_in_dw;
+               radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+               radeon_ring_write(ring, dst_offset & 0xfffffffc);
+               radeon_ring_write(ring, src_offset & 0xfffffffc);
+               radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+               radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+               src_offset += cur_size_in_dw * 4;
+               dst_offset += cur_size_in_dw * 4;
+       }
+
+       r = radeon_fence_emit(rdev, fence, ring->idx);
+       if (r) {
+               radeon_ring_unlock_undo(rdev, ring);
+               return r;
+       }
+
+       radeon_ring_unlock_commit(rdev, ring);
+       radeon_semaphore_free(rdev, &sem, *fence);
+
+       return r;
+}
+
 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
                         uint32_t tiling_flags, uint32_t pitch,
                         uint32_t offset, uint32_t obj_size)
@@ -2343,7 +2675,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
 
 static int r600_startup(struct radeon_device *rdev)
 {
-       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+       struct radeon_ring *ring;
        int r;
 
        /* enable pcie gen2 link */
@@ -2388,6 +2720,12 @@ static int r600_startup(struct radeon_device *rdev)
                return r;
        }
 
+       r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
@@ -2397,12 +2735,20 @@ static int r600_startup(struct radeon_device *rdev)
        }
        r600_irq_set(rdev);
 
+       ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
                             R600_CP_RB_RPTR, R600_CP_RB_WPTR,
                             0, 0xfffff, RADEON_CP_PACKET2);
+       if (r)
+               return r;
 
+       ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+       r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+                            DMA_RB_RPTR, DMA_RB_WPTR,
+                            2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
        if (r)
                return r;
+
        r = r600_cp_load_microcode(rdev);
        if (r)
                return r;
@@ -2410,6 +2756,10 @@ static int r600_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = r600_dma_resume(rdev);
+       if (r)
+               return r;
+
        r = radeon_ib_pool_init(rdev);
        if (r) {
                dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -2465,7 +2815,7 @@ int r600_suspend(struct radeon_device *rdev)
 {
        r600_audio_fini(rdev);
        r600_cp_stop(rdev);
-       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+       r600_dma_stop(rdev);
        r600_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        r600_pcie_gart_disable(rdev);
@@ -2538,6 +2888,9 @@ int r600_init(struct radeon_device *rdev)
        rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
        r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
+       rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+       r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
 
@@ -2550,6 +2903,7 @@ int r600_init(struct radeon_device *rdev)
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                r600_cp_fini(rdev);
+               r600_dma_fini(rdev);
                r600_irq_fini(rdev);
                radeon_wb_fini(rdev);
                radeon_ib_pool_fini(rdev);
@@ -2566,6 +2920,7 @@ void r600_fini(struct radeon_device *rdev)
        r600_audio_fini(rdev);
        r600_blit_fini(rdev);
        r600_cp_fini(rdev);
+       r600_dma_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
        radeon_ib_pool_fini(rdev);
@@ -2668,6 +3023,104 @@ free_scratch:
        return r;
 }
 
+/**
+ * r600_dma_ib_test - test an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (r6xx-SI).
+ * Returns 0 on success, error on failure.
+ */
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+       struct radeon_ib ib;
+       unsigned i;
+       int r;
+       void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+       u32 tmp = 0;
+
+       if (!ptr) {
+               DRM_ERROR("invalid vram scratch pointer\n");
+               return -EINVAL;
+       }
+
+       tmp = 0xCAFEDEAD;
+       writel(tmp, ptr);
+
+       r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+       if (r) {
+               DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+               return r;
+       }
+
+       ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
+       ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
+       ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
+       ib.ptr[3] = 0xDEADBEEF;
+       ib.length_dw = 4;
+
+       r = radeon_ib_schedule(rdev, &ib, NULL);
+       if (r) {
+               radeon_ib_free(rdev, &ib);
+               DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+               return r;
+       }
+       r = radeon_fence_wait(ib.fence, false);
+       if (r) {
+               DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+               return r;
+       }
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               tmp = readl(ptr);
+               if (tmp == 0xDEADBEEF)
+                       break;
+               DRM_UDELAY(1);
+       }
+       if (i < rdev->usec_timeout) {
+               DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+       } else {
+               DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
+               r = -EINVAL;
+       }
+       radeon_ib_free(rdev, &ib);
+       return r;
+}
+
+/**
+ * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (r6xx-r7xx).
+ */
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+       struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+       if (rdev->wb.enabled) {
+               u32 next_rptr = ring->wptr + 4;
+               while ((next_rptr & 7) != 5)
+                       next_rptr++;
+               next_rptr += 3;
+               radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+               radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+               radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+               radeon_ring_write(ring, next_rptr);
+       }
+
+       /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+        * Pad as necessary with NOPs.
+        */
+       while ((ring->wptr & 7) != 5)
+               radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+       radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+       radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
 /*
  * Interrupts
  *
@@ -2859,6 +3312,8 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
        u32 tmp;
 
        WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+       tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+       WREG32(DMA_CNTL, tmp);
        WREG32(GRBM_INT_CNTL, 0);
        WREG32(DxMODE_INT_MASK, 0);
        WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
@@ -3000,6 +3455,7 @@ int r600_irq_set(struct radeon_device *rdev)
        u32 grbm_int_cntl = 0;
        u32 hdmi0, hdmi1;
        u32 d1grph = 0, d2grph = 0;
+       u32 dma_cntl;
 
        if (!rdev->irq.installed) {
                WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3034,12 +3490,19 @@ int r600_irq_set(struct radeon_device *rdev)
                hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
                hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
        }
+       dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
 
        if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
                DRM_DEBUG("r600_irq_set: sw int\n");
                cp_int_cntl |= RB_INT_ENABLE;
                cp_int_cntl |= TIME_STAMP_INT_ENABLE;
        }
+
+       if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+               DRM_DEBUG("r600_irq_set: sw int dma\n");
+               dma_cntl |= TRAP_ENABLE;
+       }
+
        if (rdev->irq.crtc_vblank_int[0] ||
            atomic_read(&rdev->irq.pflip[0])) {
                DRM_DEBUG("r600_irq_set: vblank 0\n");
@@ -3084,6 +3547,7 @@ int r600_irq_set(struct radeon_device *rdev)
        }
 
        WREG32(CP_INT_CNTL, cp_int_cntl);
+       WREG32(DMA_CNTL, dma_cntl);
        WREG32(DxMODE_INT_MASK, mode_int);
        WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
        WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
@@ -3463,6 +3927,10 @@ restart_ih:
                        DRM_DEBUG("IH: CP EOP\n");
                        radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                        break;
+               case 224: /* DMA trap event */
+                       DRM_DEBUG("IH: DMA trap\n");
+                       radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+                       break;
                case 233: /* GUI IDLE */
                        DRM_DEBUG("IH: GUI idle\n");
                        break;
index 211c402..0be768b 100644 (file)
@@ -657,87 +657,30 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
                        /* nby is npipes htiles aligned == npipes * 8 pixel aligned */
                        nby = round_up(nby, track->npipes * 8);
                } else {
-                       /* htile widht & nby (8 or 4) make 2 bits number */
-                       tmp = track->htile_surface & 3;
+                       /* always assume 8x8 htile */
                        /* align is htile align * 8, htile align vary according to
                         * number of pipe and tile width and nby
                         */
                        switch (track->npipes) {
                        case 8:
-                               switch (tmp) {
-                               case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
-                                       nbx = round_up(nbx, 64 * 8);
-                                       nby = round_up(nby, 64 * 8);
-                                       break;
-                               case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
-                               case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
-                                       nbx = round_up(nbx, 64 * 8);
-                                       nby = round_up(nby, 32 * 8);
-                                       break;
-                               case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
-                                       nbx = round_up(nbx, 32 * 8);
-                                       nby = round_up(nby, 32 * 8);
-                                       break;
-                               default:
-                                       return -EINVAL;
-                               }
+                               /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+                               nbx = round_up(nbx, 64 * 8);
+                               nby = round_up(nby, 64 * 8);
                                break;
                        case 4:
-                               switch (tmp) {
-                               case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
-                                       nbx = round_up(nbx, 64 * 8);
-                                       nby = round_up(nby, 32 * 8);
-                                       break;
-                               case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
-                               case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
-                                       nbx = round_up(nbx, 32 * 8);
-                                       nby = round_up(nby, 32 * 8);
-                                       break;
-                               case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
-                                       nbx = round_up(nbx, 32 * 8);
-                                       nby = round_up(nby, 16 * 8);
-                                       break;
-                               default:
-                                       return -EINVAL;
-                               }
+                               /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+                               nbx = round_up(nbx, 64 * 8);
+                               nby = round_up(nby, 32 * 8);
                                break;
                        case 2:
-                               switch (tmp) {
-                               case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
-                                       nbx = round_up(nbx, 32 * 8);
-                                       nby = round_up(nby, 32 * 8);
-                                       break;
-                               case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
-                               case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
-                                       nbx = round_up(nbx, 32 * 8);
-                                       nby = round_up(nby, 16 * 8);
-                                       break;
-                               case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
-                                       nbx = round_up(nbx, 16 * 8);
-                                       nby = round_up(nby, 16 * 8);
-                                       break;
-                               default:
-                                       return -EINVAL;
-                               }
+                               /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+                               nbx = round_up(nbx, 32 * 8);
+                               nby = round_up(nby, 32 * 8);
                                break;
                        case 1:
-                               switch (tmp) {
-                               case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
-                                       nbx = round_up(nbx, 32 * 8);
-                                       nby = round_up(nby, 16 * 8);
-                                       break;
-                               case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
-                               case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
-                                       nbx = round_up(nbx, 16 * 8);
-                                       nby = round_up(nby, 16 * 8);
-                                       break;
-                               case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
-                                       nbx = round_up(nbx, 16 * 8);
-                                       nby = round_up(nby, 8 * 8);
-                                       break;
-                               default:
-                                       return -EINVAL;
-                               }
+                               /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+                               nbx = round_up(nbx, 32 * 8);
+                               nby = round_up(nby, 16 * 8);
                                break;
                        default:
                                dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
@@ -746,9 +689,10 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
                        }
                }
                /* compute number of htile */
-               nbx = G_028D24_HTILE_WIDTH(track->htile_surface) ? nbx / 8 : nbx / 4;
-               nby = G_028D24_HTILE_HEIGHT(track->htile_surface) ? nby / 8 : nby / 4;
-               size = nbx * nby * 4;
+               nbx = nbx >> 3;
+               nby = nby >> 3;
+               /* size must be aligned on npipes * 2K boundary */
+               size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
                size += track->htile_offset;
 
                if (size > radeon_bo_size(track->htile_bo)) {
@@ -1492,6 +1436,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                break;
        case DB_HTILE_SURFACE:
                track->htile_surface = radeon_get_ib_value(p, idx);
+               /* force 8x8 htile width and height */
+               ib[idx] |= 3;
                track->db_dirty = true;
                break;
        case SQ_PGM_START_FS:
@@ -1949,6 +1895,78 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                        ib[idx+2] = upper_32_bits(offset) & 0xff;
                }
                break;
+       case PACKET3_CP_DMA:
+       {
+               u32 command, size;
+               u64 offset, tmp;
+               if (pkt->count != 4) {
+                       DRM_ERROR("bad CP DMA\n");
+                       return -EINVAL;
+               }
+               command = radeon_get_ib_value(p, idx+4);
+               size = command & 0x1fffff;
+               if (command & PACKET3_CP_DMA_CMD_SAS) {
+                       /* src address space is register */
+                       DRM_ERROR("CP DMA SAS not supported\n");
+                       return -EINVAL;
+               } else {
+                       if (command & PACKET3_CP_DMA_CMD_SAIC) {
+                               DRM_ERROR("CP DMA SAIC only supported for registers\n");
+                               return -EINVAL;
+                       }
+                       /* src address space is memory */
+                       r = r600_cs_packet_next_reloc(p, &reloc);
+                       if (r) {
+                               DRM_ERROR("bad CP DMA SRC\n");
+                               return -EINVAL;
+                       }
+
+                       tmp = radeon_get_ib_value(p, idx) +
+                               ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+                       offset = reloc->lobj.gpu_offset + tmp;
+
+                       if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+                               dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+                                        tmp + size, radeon_bo_size(reloc->robj));
+                               return -EINVAL;
+                       }
+
+                       ib[idx] = offset;
+                       ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+               }
+               if (command & PACKET3_CP_DMA_CMD_DAS) {
+                       /* dst address space is register */
+                       DRM_ERROR("CP DMA DAS not supported\n");
+                       return -EINVAL;
+               } else {
+                       /* dst address space is memory */
+                       if (command & PACKET3_CP_DMA_CMD_DAIC) {
+                               DRM_ERROR("CP DMA DAIC only supported for registers\n");
+                               return -EINVAL;
+                       }
+                       r = r600_cs_packet_next_reloc(p, &reloc);
+                       if (r) {
+                               DRM_ERROR("bad CP DMA DST\n");
+                               return -EINVAL;
+                       }
+
+                       tmp = radeon_get_ib_value(p, idx+2) +
+                               ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
+
+                       offset = reloc->lobj.gpu_offset + tmp;
+
+                       if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+                               dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+                                        tmp + size, radeon_bo_size(reloc->robj));
+                               return -EINVAL;
+                       }
+
+                       ib[idx+2] = offset;
+                       ib[idx+3] = upper_32_bits(offset) & 0xff;
+               }
+               break;
+       }
        case PACKET3_SURFACE_SYNC:
                if (pkt->count != 3) {
                        DRM_ERROR("bad SURFACE_SYNC\n");
@@ -2496,3 +2514,196 @@ void r600_cs_legacy_init(void)
 {
        r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
 }
+
+/*
+ *  DMA
+ */
+/**
+ * r600_dma_cs_next_reloc() - parse next reloc
+ * @p:         parser structure holding parsing context.
+ * @cs_reloc:          reloc informations
+ *
+ * Return the next reloc, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+                          struct radeon_cs_reloc **cs_reloc)
+{
+       struct radeon_cs_chunk *relocs_chunk;
+       unsigned idx;
+
+       if (p->chunk_relocs_idx == -1) {
+               DRM_ERROR("No relocation chunk !\n");
+               return -EINVAL;
+       }
+       *cs_reloc = NULL;
+       relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+       idx = p->dma_reloc_idx;
+       if (idx >= relocs_chunk->length_dw) {
+               DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+                         idx, relocs_chunk->length_dw);
+               return -EINVAL;
+       }
+       *cs_reloc = p->relocs_ptr[idx];
+       p->dma_reloc_idx++;
+       return 0;
+}
+
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
+#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
+
+/**
+ * r600_dma_cs_parse() - parse the DMA IB
+ * @p:         parser structure holding parsing context.
+ *
+ * Parses the DMA IB from the CS ioctl and updates
+ * the GPU addresses based on the reloc information and
+ * checks for errors. (R6xx-R7xx)
+ * Returns 0 for success and an error on failure.
+ **/
+int r600_dma_cs_parse(struct radeon_cs_parser *p)
+{
+       struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+       struct radeon_cs_reloc *src_reloc, *dst_reloc;
+       u32 header, cmd, count, tiled;
+       volatile u32 *ib = p->ib.ptr;
+       u32 idx, idx_value;
+       u64 src_offset, dst_offset;
+       int r;
+
+       do {
+               if (p->idx >= ib_chunk->length_dw) {
+                       DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+                                 p->idx, ib_chunk->length_dw);
+                       return -EINVAL;
+               }
+               idx = p->idx;
+               header = radeon_get_ib_value(p, idx);
+               cmd = GET_DMA_CMD(header);
+               count = GET_DMA_COUNT(header);
+               tiled = GET_DMA_T(header);
+
+               switch (cmd) {
+               case DMA_PACKET_WRITE:
+                       r = r600_dma_cs_next_reloc(p, &dst_reloc);
+                       if (r) {
+                               DRM_ERROR("bad DMA_PACKET_WRITE\n");
+                               return -EINVAL;
+                       }
+                       if (tiled) {
+                               dst_offset = ib[idx+1];
+                               dst_offset <<= 8;
+
+                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                               p->idx += count + 5;
+                       } else {
+                               dst_offset = ib[idx+1];
+                               dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
+
+                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                               ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                               p->idx += count + 3;
+                       }
+                       if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                               dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
+                                        dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                               return -EINVAL;
+                       }
+                       break;
+               case DMA_PACKET_COPY:
+                       r = r600_dma_cs_next_reloc(p, &src_reloc);
+                       if (r) {
+                               DRM_ERROR("bad DMA_PACKET_COPY\n");
+                               return -EINVAL;
+                       }
+                       r = r600_dma_cs_next_reloc(p, &dst_reloc);
+                       if (r) {
+                               DRM_ERROR("bad DMA_PACKET_COPY\n");
+                               return -EINVAL;
+                       }
+                       if (tiled) {
+                               idx_value = radeon_get_ib_value(p, idx + 2);
+                               /* detile bit */
+                               if (idx_value & (1 << 31)) {
+                                       /* tiled src, linear dst */
+                                       src_offset = ib[idx+1];
+                                       src_offset <<= 8;
+                                       ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+                                       dst_offset = ib[idx+5];
+                                       dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+                                       ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                                       ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                               } else {
+                                       /* linear src, tiled dst */
+                                       src_offset = ib[idx+5];
+                                       src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+                                       ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                                       ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+                                       dst_offset = ib[idx+1];
+                                       dst_offset <<= 8;
+                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                               }
+                               p->idx += 7;
+                       } else {
+                               src_offset = ib[idx+2];
+                               src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+                               dst_offset = ib[idx+1];
+                               dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+
+                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                               ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                               ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                               ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                               p->idx += 5;
+                       }
+                       if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                               dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
+                                        src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                               return -EINVAL;
+                       }
+                       if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                               dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
+                                        dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                               return -EINVAL;
+                       }
+                       break;
+               case DMA_PACKET_CONSTANT_FILL:
+                       if (p->family < CHIP_RV770) {
+                               DRM_ERROR("Constant Fill is 7xx only !\n");
+                               return -EINVAL;
+                       }
+                       r = r600_dma_cs_next_reloc(p, &dst_reloc);
+                       if (r) {
+                               DRM_ERROR("bad DMA_PACKET_WRITE\n");
+                               return -EINVAL;
+                       }
+                       dst_offset = ib[idx+1];
+                       dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
+                       if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                               dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+                                        dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                               return -EINVAL;
+                       }
+                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                       ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+                       p->idx += 4;
+                       break;
+               case DMA_PACKET_NOP:
+                       p->idx += 1;
+                       break;
+               default:
+                       DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+                       return -EINVAL;
+               }
+       } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+       for (r = 0; r < p->ib->length_dw; r++) {
+               printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
+               mdelay(1);
+       }
+#endif
+       return 0;
+}
index 2b960cb..909219b 100644 (file)
 #define R600_CONFIG_F0_BASE                                     0x542C
 #define R600_CONFIG_APER_SIZE                                   0x5430
 
+#define        R600_BIF_FB_EN                                          0x5490
+#define                R600_FB_READ_EN                                 (1 << 0)
+#define                R600_FB_WRITE_EN                                (1 << 1)
+
+#define R600_CITF_CNTL                                         0x200c
+#define                R600_BLACKOUT_MASK                              0x00000003
+
+#define R700_MC_CITF_CNTL                                      0x25c0
+
 #define R600_ROM_CNTL                              0x1600
 #       define R600_SCK_OVERWRITE                  (1 << 1)
 #       define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
index fa6f370..4a53402 100644 (file)
 #define         WAIT_2D_IDLECLEAN_bit                           (1 << 16)
 #define         WAIT_3D_IDLECLEAN_bit                           (1 << 17)
 
+/* async DMA */
+#define DMA_TILING_CONFIG                                 0x3ec4
+#define DMA_CONFIG                                        0x3e4c
+
+#define DMA_RB_CNTL                                       0xd000
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_RB_BASE                                       0xd004
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI                               0xd01c
+#define DMA_RB_RPTR_ADDR_LO                               0xd020
+
+#define DMA_IB_CNTL                                       0xd024
+#       define DMA_IB_ENABLE                              (1 << 0)
+#       define DMA_IB_SWAP_ENABLE                         (1 << 4)
+#define DMA_IB_RPTR                                       0xd028
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_STATUS_REG                                    0xd034
+#       define DMA_IDLE                                   (1 << 0)
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL                     0xd044
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL                      0xd048
+#define DMA_MODE                                          0xd0bc
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n)       ((((cmd) & 0xF) << 28) |        \
+                                        (((t) & 0x1) << 23) |          \
+                                        (((s) & 0x1) << 22) |          \
+                                        (((n) & 0xFFFF) << 0))
+/* async DMA Packet types */
+#define        DMA_PACKET_WRITE                                  0x2
+#define        DMA_PACKET_COPY                                   0x3
+#define        DMA_PACKET_INDIRECT_BUFFER                        0x4
+#define        DMA_PACKET_SEMAPHORE                              0x5
+#define        DMA_PACKET_FENCE                                  0x6
+#define        DMA_PACKET_TRAP                                   0x7
+#define        DMA_PACKET_CONSTANT_FILL                          0xd /* 7xx only */
+#define        DMA_PACKET_NOP                                    0xf
+
 #define IH_RB_CNTL                                        0x3e00
 #       define IH_RB_ENABLE                               (1 << 0)
-#       define IH_IB_SIZE(x)                              ((x) << 1) /* log2 */
+#       define IH_RB_SIZE(x)                              ((x) << 1) /* log2 */
 #       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
 #       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
 #       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
 #define TN_RLC_CLEAR_STATE_RESTORE_BASE                   0x3f20
 
 #define SRBM_SOFT_RESET                                   0xe60
+#       define SOFT_RESET_DMA                             (1 << 12)
 #       define SOFT_RESET_RLC                             (1 << 13)
+#       define RV770_SOFT_RESET_DMA                       (1 << 20)
 
 #define CP_INT_CNTL                                       0xc124
 #       define CNTX_BUSY_INT_ENABLE                       (1 << 19)
 #define        PACKET3_WAIT_REG_MEM                            0x3C
 #define        PACKET3_MEM_WRITE                               0x3D
 #define        PACKET3_INDIRECT_BUFFER                         0x32
+#define        PACKET3_CP_DMA                                  0x41
+/* 1. header
+ * 2. SRC_ADDR_LO [31:0]
+ * 3. CP_SYNC [31] | SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+#              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
+/* COMMAND */
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+                /* 0 - none
+                * 1 - 8 in 16
+                * 2 - 8 in 32
+                * 3 - 8 in 64
+                */
+#              define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+                * 1 - 8 in 16
+                * 2 - 8 in 32
+                * 3 - 8 in 64
+                */
+#              define PACKET3_CP_DMA_CMD_SAS       (1 << 26)
+                /* 0 - memory
+                * 1 - register
+                */
+#              define PACKET3_CP_DMA_CMD_DAS       (1 << 27)
+                /* 0 - memory
+                * 1 - register
+                */
+#              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
+#              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
 #define        PACKET3_SURFACE_SYNC                            0x43
 #              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
 #              define PACKET3_TC_ACTION_ENA        (1 << 23)
index 8c42d54..5dc744d 100644 (file)
@@ -109,7 +109,7 @@ extern int radeon_lockup_timeout;
 #define RADEON_BIOS_NUM_SCRATCH                        8
 
 /* max number of rings */
-#define RADEON_NUM_RINGS                       3
+#define RADEON_NUM_RINGS                       5
 
 /* fence seq are set to this number when signaled */
 #define RADEON_FENCE_SIGNALED_SEQ              0LL
@@ -122,6 +122,11 @@ extern int radeon_lockup_timeout;
 #define CAYMAN_RING_TYPE_CP1_INDEX             1
 #define CAYMAN_RING_TYPE_CP2_INDEX             2
 
+/* R600+ has an async dma ring */
+#define R600_RING_TYPE_DMA_INDEX               3
+/* cayman add a second async dma ring */
+#define CAYMAN_RING_TYPE_DMA1_INDEX            4
+
 /* hardcode those limit for now */
 #define RADEON_VA_IB_OFFSET                    (1 << 20)
 #define RADEON_VA_RESERVED_SIZE                        (8 << 20)
@@ -313,6 +318,7 @@ struct radeon_bo {
        struct list_head                list;
        /* Protected by tbo.reserved */
        u32                             placements[3];
+       u32                             busy_placements[3];
        struct ttm_placement            placement;
        struct ttm_buffer_object        tbo;
        struct ttm_bo_kmap_obj          kmap;
@@ -787,6 +793,15 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigne
 void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
 
 
+/* r600 async dma */
+void r600_dma_stop(struct radeon_device *rdev);
+int r600_dma_resume(struct radeon_device *rdev);
+void r600_dma_fini(struct radeon_device *rdev);
+
+void cayman_dma_stop(struct radeon_device *rdev);
+int cayman_dma_resume(struct radeon_device *rdev);
+void cayman_dma_fini(struct radeon_device *rdev);
+
 /*
  * CS.
  */
@@ -824,6 +839,7 @@ struct radeon_cs_parser {
        struct radeon_cs_reloc  *relocs;
        struct radeon_cs_reloc  **relocs_ptr;
        struct list_head        validated;
+       unsigned                dma_reloc_idx;
        /* indices of various chunks */
        int                     chunk_ib_idx;
        int                     chunk_relocs_idx;
@@ -883,7 +899,9 @@ struct radeon_wb {
 #define RADEON_WB_CP_RPTR_OFFSET 1024
 #define RADEON_WB_CP1_RPTR_OFFSET 1280
 #define RADEON_WB_CP2_RPTR_OFFSET 1536
+#define R600_WB_DMA_RPTR_OFFSET   1792
 #define R600_WB_IH_WPTR_OFFSET   2048
+#define CAYMAN_WB_DMA1_RPTR_OFFSET   2304
 #define R600_WB_EVENT_OFFSET     3072
 
 /**
@@ -1539,6 +1557,8 @@ struct radeon_device {
        /* Register mmio */
        resource_size_t                 rmmio_base;
        resource_size_t                 rmmio_size;
+       /* protects concurrent MM_INDEX/DATA based register access */
+       spinlock_t mmio_idx_lock;
        void __iomem                    *rmmio;
        radeon_rreg_t                   mc_rreg;
        radeon_wreg_t                   mc_wreg;
@@ -1614,8 +1634,10 @@ int radeon_device_init(struct radeon_device *rdev,
 void radeon_device_fini(struct radeon_device *rdev);
 int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
 
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+                     bool always_indirect);
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+                 bool always_indirect);
 u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
 void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
 
@@ -1631,9 +1653,11 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
 #define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
 #define RREG16(reg) readw((rdev->rmmio) + (reg))
 #define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
-#define RREG32(reg) r100_mm_rreg(rdev, (reg))
-#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
-#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
+#define RREG32(reg) r100_mm_rreg(rdev, (reg), false)
+#define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true)
+#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg), false))
+#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v), false)
+#define WREG32_IDX(reg, v) r100_mm_wreg(rdev, (reg), (v), true)
 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
 #define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
@@ -1658,7 +1682,7 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
                tmp_ |= ((val) & ~(mask));                      \
                WREG32_PLL(reg, tmp_);                          \
        } while (0)
-#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg)))
+#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false))
 #define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
 #define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
 
index 10ea17a..4243334 100644 (file)
@@ -69,9 +69,12 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
        /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/
        { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59,
                PCI_VENDOR_ID_DELL, 0x00e3, 2},
-       /* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */
+       /* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */
        { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66,
                PCI_VENDOR_ID_DELL, 0x0149, 1},
+       /* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */
+       { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66,
+               PCI_VENDOR_ID_IBM, 0x0531, 1},
        /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */
        { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
                0x1025, 0x0061, 1},
index 654520b..596bcbe 100644 (file)
@@ -947,6 +947,15 @@ static struct radeon_asic r600_asic = {
                        .ring_test = &r600_ring_test,
                        .ib_test = &r600_ib_test,
                        .is_lockup = &r600_gpu_is_lockup,
+               },
+               [R600_RING_TYPE_DMA_INDEX] = {
+                       .ib_execute = &r600_dma_ring_ib_execute,
+                       .emit_fence = &r600_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = &r600_dma_cs_parse,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &r600_dma_is_lockup,
                }
        },
        .irq = {
@@ -963,10 +972,10 @@ static struct radeon_asic r600_asic = {
        .copy = {
                .blit = &r600_copy_blit,
                .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .dma = NULL,
-               .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .copy = &r600_copy_blit,
-               .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .dma = &r600_copy_dma,
+               .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .copy = &r600_copy_dma,
+               .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
        },
        .surface = {
                .set_reg = r600_set_surface_reg,
@@ -1022,6 +1031,15 @@ static struct radeon_asic rs780_asic = {
                        .ring_test = &r600_ring_test,
                        .ib_test = &r600_ib_test,
                        .is_lockup = &r600_gpu_is_lockup,
+               },
+               [R600_RING_TYPE_DMA_INDEX] = {
+                       .ib_execute = &r600_dma_ring_ib_execute,
+                       .emit_fence = &r600_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = &r600_dma_cs_parse,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &r600_dma_is_lockup,
                }
        },
        .irq = {
@@ -1038,10 +1056,10 @@ static struct radeon_asic rs780_asic = {
        .copy = {
                .blit = &r600_copy_blit,
                .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .dma = NULL,
-               .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .copy = &r600_copy_blit,
-               .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .dma = &r600_copy_dma,
+               .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .copy = &r600_copy_dma,
+               .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
        },
        .surface = {
                .set_reg = r600_set_surface_reg,
@@ -1097,6 +1115,15 @@ static struct radeon_asic rv770_asic = {
                        .ring_test = &r600_ring_test,
                        .ib_test = &r600_ib_test,
                        .is_lockup = &r600_gpu_is_lockup,
+               },
+               [R600_RING_TYPE_DMA_INDEX] = {
+                       .ib_execute = &r600_dma_ring_ib_execute,
+                       .emit_fence = &r600_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = &r600_dma_cs_parse,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &r600_dma_is_lockup,
                }
        },
        .irq = {
@@ -1113,10 +1140,10 @@ static struct radeon_asic rv770_asic = {
        .copy = {
                .blit = &r600_copy_blit,
                .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .dma = NULL,
-               .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .copy = &r600_copy_blit,
-               .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .dma = &r600_copy_dma,
+               .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .copy = &r600_copy_dma,
+               .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
        },
        .surface = {
                .set_reg = r600_set_surface_reg,
@@ -1172,6 +1199,15 @@ static struct radeon_asic evergreen_asic = {
                        .ring_test = &r600_ring_test,
                        .ib_test = &r600_ib_test,
                        .is_lockup = &evergreen_gpu_is_lockup,
+               },
+               [R600_RING_TYPE_DMA_INDEX] = {
+                       .ib_execute = &evergreen_dma_ring_ib_execute,
+                       .emit_fence = &evergreen_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = &evergreen_dma_cs_parse,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &r600_dma_is_lockup,
                }
        },
        .irq = {
@@ -1188,10 +1224,10 @@ static struct radeon_asic evergreen_asic = {
        .copy = {
                .blit = &r600_copy_blit,
                .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .dma = NULL,
-               .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .copy = &r600_copy_blit,
-               .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .dma = &evergreen_copy_dma,
+               .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .copy = &evergreen_copy_dma,
+               .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
        },
        .surface = {
                .set_reg = r600_set_surface_reg,
@@ -1248,6 +1284,15 @@ static struct radeon_asic sumo_asic = {
                        .ib_test = &r600_ib_test,
                        .is_lockup = &evergreen_gpu_is_lockup,
                },
+               [R600_RING_TYPE_DMA_INDEX] = {
+                       .ib_execute = &evergreen_dma_ring_ib_execute,
+                       .emit_fence = &evergreen_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = &evergreen_dma_cs_parse,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &r600_dma_is_lockup,
+               }
        },
        .irq = {
                .set = &evergreen_irq_set,
@@ -1263,10 +1308,10 @@ static struct radeon_asic sumo_asic = {
        .copy = {
                .blit = &r600_copy_blit,
                .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .dma = NULL,
-               .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .copy = &r600_copy_blit,
-               .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .dma = &evergreen_copy_dma,
+               .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .copy = &evergreen_copy_dma,
+               .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
        },
        .surface = {
                .set_reg = r600_set_surface_reg,
@@ -1322,6 +1367,15 @@ static struct radeon_asic btc_asic = {
                        .ring_test = &r600_ring_test,
                        .ib_test = &r600_ib_test,
                        .is_lockup = &evergreen_gpu_is_lockup,
+               },
+               [R600_RING_TYPE_DMA_INDEX] = {
+                       .ib_execute = &evergreen_dma_ring_ib_execute,
+                       .emit_fence = &evergreen_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = &evergreen_dma_cs_parse,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &r600_dma_is_lockup,
                }
        },
        .irq = {
@@ -1338,10 +1392,10 @@ static struct radeon_asic btc_asic = {
        .copy = {
                .blit = &r600_copy_blit,
                .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .dma = NULL,
-               .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .copy = &r600_copy_blit,
-               .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .dma = &evergreen_copy_dma,
+               .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .copy = &evergreen_copy_dma,
+               .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
        },
        .surface = {
                .set_reg = r600_set_surface_reg,
@@ -1391,7 +1445,7 @@ static struct radeon_asic cayman_asic = {
        .vm = {
                .init = &cayman_vm_init,
                .fini = &cayman_vm_fini,
-               .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
                .set_page = &cayman_vm_set_page,
        },
        .ring = {
@@ -1427,6 +1481,28 @@ static struct radeon_asic cayman_asic = {
                        .ib_test = &r600_ib_test,
                        .is_lockup = &evergreen_gpu_is_lockup,
                        .vm_flush = &cayman_vm_flush,
+               },
+               [R600_RING_TYPE_DMA_INDEX] = {
+                       .ib_execute = &cayman_dma_ring_ib_execute,
+                       .ib_parse = &evergreen_dma_ib_parse,
+                       .emit_fence = &evergreen_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = &evergreen_dma_cs_parse,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &cayman_dma_is_lockup,
+                       .vm_flush = &cayman_dma_vm_flush,
+               },
+               [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+                       .ib_execute = &cayman_dma_ring_ib_execute,
+                       .ib_parse = &evergreen_dma_ib_parse,
+                       .emit_fence = &evergreen_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = &evergreen_dma_cs_parse,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &cayman_dma_is_lockup,
+                       .vm_flush = &cayman_dma_vm_flush,
                }
        },
        .irq = {
@@ -1443,10 +1519,10 @@ static struct radeon_asic cayman_asic = {
        .copy = {
                .blit = &r600_copy_blit,
                .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .dma = NULL,
-               .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .copy = &r600_copy_blit,
-               .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .dma = &evergreen_copy_dma,
+               .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .copy = &evergreen_copy_dma,
+               .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
        },
        .surface = {
                .set_reg = r600_set_surface_reg,
@@ -1496,7 +1572,7 @@ static struct radeon_asic trinity_asic = {
        .vm = {
                .init = &cayman_vm_init,
                .fini = &cayman_vm_fini,
-               .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
                .set_page = &cayman_vm_set_page,
        },
        .ring = {
@@ -1532,6 +1608,28 @@ static struct radeon_asic trinity_asic = {
                        .ib_test = &r600_ib_test,
                        .is_lockup = &evergreen_gpu_is_lockup,
                        .vm_flush = &cayman_vm_flush,
+               },
+               [R600_RING_TYPE_DMA_INDEX] = {
+                       .ib_execute = &cayman_dma_ring_ib_execute,
+                       .ib_parse = &evergreen_dma_ib_parse,
+                       .emit_fence = &evergreen_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = &evergreen_dma_cs_parse,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &cayman_dma_is_lockup,
+                       .vm_flush = &cayman_dma_vm_flush,
+               },
+               [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+                       .ib_execute = &cayman_dma_ring_ib_execute,
+                       .ib_parse = &evergreen_dma_ib_parse,
+                       .emit_fence = &evergreen_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = &evergreen_dma_cs_parse,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &cayman_dma_is_lockup,
+                       .vm_flush = &cayman_dma_vm_flush,
                }
        },
        .irq = {
@@ -1548,10 +1646,10 @@ static struct radeon_asic trinity_asic = {
        .copy = {
                .blit = &r600_copy_blit,
                .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .dma = NULL,
-               .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .copy = &r600_copy_blit,
-               .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .dma = &evergreen_copy_dma,
+               .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .copy = &evergreen_copy_dma,
+               .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
        },
        .surface = {
                .set_reg = r600_set_surface_reg,
@@ -1601,7 +1699,7 @@ static struct radeon_asic si_asic = {
        .vm = {
                .init = &si_vm_init,
                .fini = &si_vm_fini,
-               .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
                .set_page = &si_vm_set_page,
        },
        .ring = {
@@ -1637,6 +1735,28 @@ static struct radeon_asic si_asic = {
                        .ib_test = &r600_ib_test,
                        .is_lockup = &si_gpu_is_lockup,
                        .vm_flush = &si_vm_flush,
+               },
+               [R600_RING_TYPE_DMA_INDEX] = {
+                       .ib_execute = &cayman_dma_ring_ib_execute,
+                       .ib_parse = &evergreen_dma_ib_parse,
+                       .emit_fence = &evergreen_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = NULL,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &cayman_dma_is_lockup,
+                       .vm_flush = &si_dma_vm_flush,
+               },
+               [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+                       .ib_execute = &cayman_dma_ring_ib_execute,
+                       .ib_parse = &evergreen_dma_ib_parse,
+                       .emit_fence = &evergreen_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = NULL,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &cayman_dma_is_lockup,
+                       .vm_flush = &si_dma_vm_flush,
                }
        },
        .irq = {
@@ -1653,10 +1773,10 @@ static struct radeon_asic si_asic = {
        .copy = {
                .blit = NULL,
                .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .dma = NULL,
-               .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .copy = NULL,
-               .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .dma = &si_copy_dma,
+               .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .copy = &si_copy_dma,
+               .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
        },
        .surface = {
                .set_reg = r600_set_surface_reg,
index 5e3a0e5..5f4882c 100644 (file)
@@ -263,6 +263,7 @@ extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
 struct rv515_mc_save {
        u32 vga_render_control;
        u32 vga_hdp_control;
+       bool crtc_enabled[2];
 };
 
 int rv515_init(struct radeon_device *rdev);
@@ -303,12 +304,21 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
 uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
 void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 int r600_cs_parse(struct radeon_cs_parser *p);
+int r600_dma_cs_parse(struct radeon_cs_parser *p);
 void r600_fence_ring_emit(struct radeon_device *rdev,
                          struct radeon_fence *fence);
 void r600_semaphore_ring_emit(struct radeon_device *rdev,
                              struct radeon_ring *cp,
                              struct radeon_semaphore *semaphore,
                              bool emit_wait);
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+                             struct radeon_fence *fence);
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+                                 struct radeon_ring *ring,
+                                 struct radeon_semaphore *semaphore,
+                                 bool emit_wait);
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
 bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
 int r600_asic_reset(struct radeon_device *rdev);
 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
@@ -316,11 +326,16 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg,
                         uint32_t offset, uint32_t obj_size);
 void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
 int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
 int r600_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset, uint64_t dst_offset,
                   unsigned num_gpu_pages, struct radeon_fence **fence);
+int r600_copy_dma(struct radeon_device *rdev,
+                 uint64_t src_offset, uint64_t dst_offset,
+                 unsigned num_gpu_pages, struct radeon_fence **fence);
 void r600_hpd_init(struct radeon_device *rdev);
 void r600_hpd_fini(struct radeon_device *rdev);
 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -416,6 +431,7 @@ u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
 int evergreen_irq_set(struct radeon_device *rdev);
 int evergreen_irq_process(struct radeon_device *rdev);
 extern int evergreen_cs_parse(struct radeon_cs_parser *p);
+extern int evergreen_dma_cs_parse(struct radeon_cs_parser *p);
 extern void evergreen_pm_misc(struct radeon_device *rdev);
 extern void evergreen_pm_prepare(struct radeon_device *rdev);
 extern void evergreen_pm_finish(struct radeon_device *rdev);
@@ -428,6 +444,14 @@ extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
 void evergreen_disable_interrupt_state(struct radeon_device *rdev);
 int evergreen_blit_init(struct radeon_device *rdev);
 int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+                                  struct radeon_fence *fence);
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+                                  struct radeon_ib *ib);
+int evergreen_copy_dma(struct radeon_device *rdev,
+                      uint64_t src_offset, uint64_t dst_offset,
+                      unsigned num_gpu_pages,
+                      struct radeon_fence **fence);
 
 /*
  * cayman
@@ -449,6 +473,11 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
                        uint64_t addr, unsigned count,
                        uint32_t incr, uint32_t flags);
 int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+                               struct radeon_ib *ib);
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 
 /* DCE6 - SI */
 void dce6_bandwidth_update(struct radeon_device *rdev);
@@ -476,5 +505,10 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
 void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 uint64_t si_get_gpu_clock(struct radeon_device *rdev);
+int si_copy_dma(struct radeon_device *rdev,
+               uint64_t src_offset, uint64_t dst_offset,
+               unsigned num_gpu_pages,
+               struct radeon_fence **fence);
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 
 #endif
index 45b660b..4af8912 100644 (file)
@@ -3246,11 +3246,9 @@ static uint32_t combios_detect_ram(struct drm_device *dev, int ram,
        while (ram--) {
                addr = ram * 1024 * 1024;
                /* write to each page */
-               WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER);
-               WREG32(RADEON_MM_DATA, 0xdeadbeef);
+               WREG32_IDX((addr) | RADEON_MM_APER, 0xdeadbeef);
                /* read back and verify */
-               WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER);
-               if (RREG32(RADEON_MM_DATA) != 0xdeadbeef)
+               if (RREG32_IDX((addr) | RADEON_MM_APER) != 0xdeadbeef)
                        return 0;
        }
 
index b884c36..47bf162 100644 (file)
@@ -1599,7 +1599,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                        connector->interlace_allowed = true;
                        connector->doublescan_allowed = true;
                        radeon_connector->dac_load_detect = true;
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.load_detect_property,
                                                      1);
                        break;
@@ -1608,13 +1608,13 @@ radeon_add_atom_connector(struct drm_device *dev,
                case DRM_MODE_CONNECTOR_HDMIA:
                case DRM_MODE_CONNECTOR_HDMIB:
                case DRM_MODE_CONNECTOR_DisplayPort:
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.underscan_property,
                                                      UNDERSCAN_OFF);
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.underscan_hborder_property,
                                                      0);
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.underscan_vborder_property,
                                                      0);
                        subpixel_order = SubPixelHorizontalRGB;
@@ -1625,14 +1625,14 @@ radeon_add_atom_connector(struct drm_device *dev,
                                connector->doublescan_allowed = false;
                        if (connector_type == DRM_MODE_CONNECTOR_DVII) {
                                radeon_connector->dac_load_detect = true;
-                               drm_connector_attach_property(&radeon_connector->base,
+                               drm_object_attach_property(&radeon_connector->base.base,
                                                              rdev->mode_info.load_detect_property,
                                                              1);
                        }
                        break;
                case DRM_MODE_CONNECTOR_LVDS:
                case DRM_MODE_CONNECTOR_eDP:
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      dev->mode_config.scaling_mode_property,
                                                      DRM_MODE_SCALE_FULLSCREEN);
                        subpixel_order = SubPixelHorizontalRGB;
@@ -1651,7 +1651,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                                        DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                        }
                        radeon_connector->dac_load_detect = true;
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.load_detect_property,
                                                      1);
                        /* no HPD on analog connectors */
@@ -1669,7 +1669,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                                        DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                        }
                        radeon_connector->dac_load_detect = true;
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.load_detect_property,
                                                      1);
                        /* no HPD on analog connectors */
@@ -1692,23 +1692,23 @@ radeon_add_atom_connector(struct drm_device *dev,
                                        DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                        }
                        subpixel_order = SubPixelHorizontalRGB;
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.coherent_mode_property,
                                                      1);
                        if (ASIC_IS_AVIVO(rdev)) {
-                               drm_connector_attach_property(&radeon_connector->base,
+                               drm_object_attach_property(&radeon_connector->base.base,
                                                              rdev->mode_info.underscan_property,
                                                              UNDERSCAN_OFF);
-                               drm_connector_attach_property(&radeon_connector->base,
+                               drm_object_attach_property(&radeon_connector->base.base,
                                                              rdev->mode_info.underscan_hborder_property,
                                                              0);
-                               drm_connector_attach_property(&radeon_connector->base,
+                               drm_object_attach_property(&radeon_connector->base.base,
                                                              rdev->mode_info.underscan_vborder_property,
                                                              0);
                        }
                        if (connector_type == DRM_MODE_CONNECTOR_DVII) {
                                radeon_connector->dac_load_detect = true;
-                               drm_connector_attach_property(&radeon_connector->base,
+                               drm_object_attach_property(&radeon_connector->base.base,
                                                              rdev->mode_info.load_detect_property,
                                                              1);
                        }
@@ -1732,17 +1732,17 @@ radeon_add_atom_connector(struct drm_device *dev,
                                if (!radeon_connector->ddc_bus)
                                        DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                        }
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.coherent_mode_property,
                                                      1);
                        if (ASIC_IS_AVIVO(rdev)) {
-                               drm_connector_attach_property(&radeon_connector->base,
+                               drm_object_attach_property(&radeon_connector->base.base,
                                                              rdev->mode_info.underscan_property,
                                                              UNDERSCAN_OFF);
-                               drm_connector_attach_property(&radeon_connector->base,
+                               drm_object_attach_property(&radeon_connector->base.base,
                                                              rdev->mode_info.underscan_hborder_property,
                                                              0);
-                               drm_connector_attach_property(&radeon_connector->base,
+                               drm_object_attach_property(&radeon_connector->base.base,
                                                              rdev->mode_info.underscan_vborder_property,
                                                              0);
                        }
@@ -1771,17 +1771,17 @@ radeon_add_atom_connector(struct drm_device *dev,
                                        DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                        }
                        subpixel_order = SubPixelHorizontalRGB;
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.coherent_mode_property,
                                                      1);
                        if (ASIC_IS_AVIVO(rdev)) {
-                               drm_connector_attach_property(&radeon_connector->base,
+                               drm_object_attach_property(&radeon_connector->base.base,
                                                              rdev->mode_info.underscan_property,
                                                              UNDERSCAN_OFF);
-                               drm_connector_attach_property(&radeon_connector->base,
+                               drm_object_attach_property(&radeon_connector->base.base,
                                                              rdev->mode_info.underscan_hborder_property,
                                                              0);
-                               drm_connector_attach_property(&radeon_connector->base,
+                               drm_object_attach_property(&radeon_connector->base.base,
                                                              rdev->mode_info.underscan_vborder_property,
                                                              0);
                        }
@@ -1806,7 +1806,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                                if (!radeon_connector->ddc_bus)
                                        DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                        }
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      dev->mode_config.scaling_mode_property,
                                                      DRM_MODE_SCALE_FULLSCREEN);
                        subpixel_order = SubPixelHorizontalRGB;
@@ -1819,10 +1819,10 @@ radeon_add_atom_connector(struct drm_device *dev,
                        drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
                        drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
                        radeon_connector->dac_load_detect = true;
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.load_detect_property,
                                                      1);
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.tv_std_property,
                                                      radeon_atombios_get_tv_info(rdev));
                        /* no HPD on analog connectors */
@@ -1843,7 +1843,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                                if (!radeon_connector->ddc_bus)
                                        DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                        }
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      dev->mode_config.scaling_mode_property,
                                                      DRM_MODE_SCALE_FULLSCREEN);
                        subpixel_order = SubPixelHorizontalRGB;
@@ -1922,7 +1922,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
                                DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                }
                radeon_connector->dac_load_detect = true;
-               drm_connector_attach_property(&radeon_connector->base,
+               drm_object_attach_property(&radeon_connector->base.base,
                                              rdev->mode_info.load_detect_property,
                                              1);
                /* no HPD on analog connectors */
@@ -1940,7 +1940,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
                                DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                }
                radeon_connector->dac_load_detect = true;
-               drm_connector_attach_property(&radeon_connector->base,
+               drm_object_attach_property(&radeon_connector->base.base,
                                              rdev->mode_info.load_detect_property,
                                              1);
                /* no HPD on analog connectors */
@@ -1959,7 +1959,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
                }
                if (connector_type == DRM_MODE_CONNECTOR_DVII) {
                        radeon_connector->dac_load_detect = true;
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.load_detect_property,
                                                      1);
                }
@@ -1983,10 +1983,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
                 */
                if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
                        radeon_connector->dac_load_detect = false;
-               drm_connector_attach_property(&radeon_connector->base,
+               drm_object_attach_property(&radeon_connector->base.base,
                                              rdev->mode_info.load_detect_property,
                                              radeon_connector->dac_load_detect);
-               drm_connector_attach_property(&radeon_connector->base,
+               drm_object_attach_property(&radeon_connector->base.base,
                                              rdev->mode_info.tv_std_property,
                                              radeon_combios_get_tv_info(rdev));
                /* no HPD on analog connectors */
@@ -2002,7 +2002,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
                        if (!radeon_connector->ddc_bus)
                                DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                }
-               drm_connector_attach_property(&radeon_connector->base,
+               drm_object_attach_property(&radeon_connector->base.base,
                                              dev->mode_config.scaling_mode_property,
                                              DRM_MODE_SCALE_FULLSCREEN);
                subpixel_order = SubPixelHorizontalRGB;
index 8b2797d..9143fc4 100644 (file)
@@ -116,20 +116,6 @@ u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index)
        }
 }
 
-u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr)
-{
-       u32 ret;
-
-       if (addr < 0x10000)
-               ret = DRM_READ32(dev_priv->mmio, addr);
-       else {
-               DRM_WRITE32(dev_priv->mmio, RADEON_MM_INDEX, addr);
-               ret = DRM_READ32(dev_priv->mmio, RADEON_MM_DATA);
-       }
-
-       return ret;
-}
-
 static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
 {
        u32 ret;
index 41672cc..396baba 100644 (file)
@@ -43,6 +43,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
                return 0;
        }
        chunk = &p->chunks[p->chunk_relocs_idx];
+       p->dma_reloc_idx = 0;
        /* FIXME: we assume that each relocs use 4 dwords */
        p->nrelocs = chunk->length_dw / 4;
        p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
@@ -111,6 +112,18 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
                } else
                        p->ring = RADEON_RING_TYPE_GFX_INDEX;
                break;
+       case RADEON_CS_RING_DMA:
+               if (p->rdev->family >= CHIP_CAYMAN) {
+                       if (p->priority > 0)
+                               p->ring = R600_RING_TYPE_DMA_INDEX;
+                       else
+                               p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
+               } else if (p->rdev->family >= CHIP_R600) {
+                       p->ring = R600_RING_TYPE_DMA_INDEX;
+               } else {
+                       return -EINVAL;
+               }
+               break;
        }
        return 0;
 }
index 0fe56c9..ad6df62 100644 (file)
@@ -66,24 +66,25 @@ static void radeon_hide_cursor(struct drm_crtc *crtc)
        struct radeon_device *rdev = crtc->dev->dev_private;
 
        if (ASIC_IS_DCE4(rdev)) {
-               WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
-               WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
-                      EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+               WREG32_IDX(EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset,
+                          EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+                          EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
        } else if (ASIC_IS_AVIVO(rdev)) {
-               WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
-               WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+               WREG32_IDX(AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset,
+                          (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
        } else {
+               u32 reg;
                switch (radeon_crtc->crtc_id) {
                case 0:
-                       WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
+                       reg = RADEON_CRTC_GEN_CNTL;
                        break;
                case 1:
-                       WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
+                       reg = RADEON_CRTC2_GEN_CNTL;
                        break;
                default:
                        return;
                }
-               WREG32_P(RADEON_MM_DATA, 0, ~RADEON_CRTC_CUR_EN);
+               WREG32_IDX(reg, RREG32_IDX(reg) & ~RADEON_CRTC_CUR_EN);
        }
 }
 
index e2f5f88..49b0659 100644 (file)
@@ -1059,6 +1059,7 @@ int radeon_device_init(struct radeon_device *rdev,
 
        /* Registers mapping */
        /* TODO: block userspace mapping of io register */
+       spin_lock_init(&rdev->mmio_idx_lock);
        rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
        rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
        rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
index bfa2a60..310c0e5 100644 (file)
@@ -378,8 +378,12 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
        work->old_rbo = rbo;
        obj = new_radeon_fb->obj;
        rbo = gem_to_radeon_bo(obj);
+
+       spin_lock(&rbo->tbo.bdev->fence_lock);
        if (rbo->tbo.sync_obj)
                work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
+       spin_unlock(&rbo->tbo.bdev->fence_lock);
+
        INIT_WORK(&work->work, radeon_unpin_work_func);
 
        /* We borrow the event spin lock for protecting unpin_work */
index 8c1a83c..9b1a727 100644 (file)
  *   2.22.0 - r600 only: RESOLVE_BOX allowed
  *   2.23.0 - allow STRMOUT_BASE_UPDATE on RS780 and RS880
  *   2.24.0 - eg only: allow MIP_ADDRESS=0 for MSAA textures
+ *   2.25.0 - eg+: new info request for num SE and num SH
+ *   2.26.0 - r600-eg: fix htile size computation
+ *   2.27.0 - r600-SI: Add CS ioctl support for async DMA
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       24
+#define KMS_DRIVER_MINOR       27
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
index a1b59ca..e7fdf16 100644 (file)
@@ -366,7 +366,6 @@ extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file
 extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
 extern void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc);
 extern void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base);
-extern u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr);
 
 extern void radeon_freelist_reset(struct drm_device * dev);
 extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
index 22bd6c2..410a975 100644 (file)
@@ -772,7 +772,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
        int r;
 
        radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
-       if (rdev->wb.use_event) {
+       if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
                rdev->fence_drv[ring].scratch_reg = 0;
                index = R600_WB_EVENT_OFFSET + ring * 4;
        } else {
index 8690be7..6e24f84 100644 (file)
@@ -1237,7 +1237,6 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
 {
        struct radeon_bo_va *bo_va;
 
-       BUG_ON(!radeon_bo_is_reserved(bo));
        list_for_each_entry(bo_va, &bo->va, bo_list) {
                bo_va->valid = false;
        }
index dc781c4..9c312f9 100644 (file)
@@ -361,6 +361,22 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        return -EINVAL;
                }
                break;
+       case RADEON_INFO_MAX_SE:
+               if (rdev->family >= CHIP_TAHITI)
+                       value = rdev->config.si.max_shader_engines;
+               else if (rdev->family >= CHIP_CAYMAN)
+                       value = rdev->config.cayman.max_shader_engines;
+               else if (rdev->family >= CHIP_CEDAR)
+                       value = rdev->config.evergreen.num_ses;
+               else
+                       value = 1;
+               break;
+       case RADEON_INFO_MAX_SH_PER_SE:
+               if (rdev->family >= CHIP_TAHITI)
+                       value = rdev->config.si.max_sh_per_se;
+               else
+                       return -EINVAL;
+               break;
        default:
                DRM_DEBUG_KMS("Invalid request %d\n", info->request);
                return -EINVAL;
index 7c4b4bb..883c95d 100644 (file)
@@ -84,17 +84,34 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
        rbo->placement.fpfn = 0;
        rbo->placement.lpfn = 0;
        rbo->placement.placement = rbo->placements;
-       rbo->placement.busy_placement = rbo->placements;
        if (domain & RADEON_GEM_DOMAIN_VRAM)
                rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
                                        TTM_PL_FLAG_VRAM;
-       if (domain & RADEON_GEM_DOMAIN_GTT)
-               rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
-       if (domain & RADEON_GEM_DOMAIN_CPU)
-               rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+       if (domain & RADEON_GEM_DOMAIN_GTT) {
+               if (rbo->rdev->flags & RADEON_IS_AGP) {
+                       rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
+               } else {
+                       rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+               }
+       }
+       if (domain & RADEON_GEM_DOMAIN_CPU) {
+               if (rbo->rdev->flags & RADEON_IS_AGP) {
+                       rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
+               } else {
+                       rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
+               }
+       }
        if (!c)
                rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
        rbo->placement.num_placement = c;
+
+       c = 0;
+       rbo->placement.busy_placement = rbo->busy_placements;
+       if (rbo->rdev->flags & RADEON_IS_AGP) {
+               rbo->busy_placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
+       } else {
+               rbo->busy_placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+       }
        rbo->placement.num_busy_placement = c;
 }
 
@@ -240,7 +257,7 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
        }
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
        if (likely(r == 0)) {
                bo->pin_count = 1;
                if (gpu_addr != NULL)
@@ -269,7 +286,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
                return 0;
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
        if (unlikely(r != 0))
                dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
        return r;
@@ -340,7 +357,6 @@ int radeon_bo_list_validate(struct list_head *head)
 {
        struct radeon_bo_list *lobj;
        struct radeon_bo *bo;
-       u32 domain;
        int r;
 
        r = ttm_eu_reserve_buffers(head);
@@ -350,17 +366,9 @@ int radeon_bo_list_validate(struct list_head *head)
        list_for_each_entry(lobj, head, tv.head) {
                bo = lobj->bo;
                if (!bo->pin_count) {
-                       domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
-                       
-               retry:
-                       radeon_ttm_placement_from_domain(bo, domain);
                        r = ttm_bo_validate(&bo->tbo, &bo->placement,
-                                               true, false, false);
+                                               true, false);
                        if (unlikely(r)) {
-                               if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
-                                       domain |= RADEON_GEM_DOMAIN_GTT;
-                                       goto retry;
-                               }
                                return r;
                        }
                }
@@ -520,7 +528,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
                                bool force_drop)
 {
-       BUG_ON(!radeon_bo_is_reserved(bo));
+       BUG_ON(!radeon_bo_is_reserved(bo) && !force_drop);
 
        if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
                return 0;
@@ -575,7 +583,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
                        /* hurrah the memory is not visible ! */
                        radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
                        rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
-                       r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
+                       r = ttm_bo_validate(bo, &rbo->placement, false, false);
                        if (unlikely(r != 0))
                                return r;
                        offset = bo->mem.start << PAGE_SHIFT;
index 587c09a..fda09c9 100644 (file)
 #include "radeon_reg.h"
 #include "radeon.h"
 
+#define RADEON_TEST_COPY_BLIT 1
+#define RADEON_TEST_COPY_DMA  0
+
 
 /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
-void radeon_test_moves(struct radeon_device *rdev)
+static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
 {
        struct radeon_bo *vram_obj = NULL;
        struct radeon_bo **gtt_obj = NULL;
        struct radeon_fence *fence = NULL;
        uint64_t gtt_addr, vram_addr;
        unsigned i, n, size;
-       int r;
+       int r, ring;
+
+       switch (flag) {
+       case RADEON_TEST_COPY_DMA:
+               ring = radeon_copy_dma_ring_index(rdev);
+               break;
+       case RADEON_TEST_COPY_BLIT:
+               ring = radeon_copy_blit_ring_index(rdev);
+               break;
+       default:
+               DRM_ERROR("Unknown copy method\n");
+               return;
+       }
 
        size = 1024 * 1024;
 
@@ -106,7 +121,10 @@ void radeon_test_moves(struct radeon_device *rdev)
 
                radeon_bo_kunmap(gtt_obj[i]);
 
-               r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+               if (ring == R600_RING_TYPE_DMA_INDEX)
+                       r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+               else
+                       r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
                if (r) {
                        DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
                        goto out_cleanup;
@@ -149,7 +167,10 @@ void radeon_test_moves(struct radeon_device *rdev)
 
                radeon_bo_kunmap(vram_obj);
 
-               r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+               if (ring == R600_RING_TYPE_DMA_INDEX)
+                       r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+               else
+                       r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
                if (r) {
                        DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
                        goto out_cleanup;
@@ -223,6 +244,14 @@ out_cleanup:
        }
 }
 
+void radeon_test_moves(struct radeon_device *rdev)
+{
+       if (rdev->asic->copy.dma)
+               radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA);
+       if (rdev->asic->copy.blit)
+               radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
+}
+
 void radeon_test_ring_sync(struct radeon_device *rdev,
                           struct radeon_ring *ringA,
                           struct radeon_ring *ringB)
index 563c8ed..1d8ff2f 100644 (file)
@@ -216,7 +216,7 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
 }
 
 static int radeon_move_blit(struct ttm_buffer_object *bo,
-                       bool evict, int no_wait_reserve, bool no_wait_gpu,
+                       bool evict, bool no_wait_gpu,
                        struct ttm_mem_reg *new_mem,
                        struct ttm_mem_reg *old_mem)
 {
@@ -266,14 +266,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
                        &fence);
        /* FIXME: handle copy error */
        r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
-                                     evict, no_wait_reserve, no_wait_gpu, new_mem);
+                                     evict, no_wait_gpu, new_mem);
        radeon_fence_unref(&fence);
        return r;
 }
 
 static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
                                bool evict, bool interruptible,
-                               bool no_wait_reserve, bool no_wait_gpu,
+                               bool no_wait_gpu,
                                struct ttm_mem_reg *new_mem)
 {
        struct radeon_device *rdev;
@@ -294,7 +294,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
        placement.busy_placement = &placements;
        placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
        r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
-                            interruptible, no_wait_reserve, no_wait_gpu);
+                            interruptible, no_wait_gpu);
        if (unlikely(r)) {
                return r;
        }
@@ -308,11 +308,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
+       r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
+       r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
 out_cleanup:
        ttm_bo_mem_put(bo, &tmp_mem);
        return r;
@@ -320,7 +320,7 @@ out_cleanup:
 
 static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
                                bool evict, bool interruptible,
-                               bool no_wait_reserve, bool no_wait_gpu,
+                               bool no_wait_gpu,
                                struct ttm_mem_reg *new_mem)
 {
        struct radeon_device *rdev;
@@ -340,15 +340,16 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
        placement.num_busy_placement = 1;
        placement.busy_placement = &placements;
        placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
-       r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
+       r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
+                            interruptible, no_wait_gpu);
        if (unlikely(r)) {
                return r;
        }
-       r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
+       r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
+       r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
@@ -359,7 +360,7 @@ out_cleanup:
 
 static int radeon_bo_move(struct ttm_buffer_object *bo,
                        bool evict, bool interruptible,
-                       bool no_wait_reserve, bool no_wait_gpu,
+                       bool no_wait_gpu,
                        struct ttm_mem_reg *new_mem)
 {
        struct radeon_device *rdev;
@@ -388,18 +389,18 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
        if (old_mem->mem_type == TTM_PL_VRAM &&
            new_mem->mem_type == TTM_PL_SYSTEM) {
                r = radeon_move_vram_ram(bo, evict, interruptible,
-                                       no_wait_reserve, no_wait_gpu, new_mem);
+                                       no_wait_gpu, new_mem);
        } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
                   new_mem->mem_type == TTM_PL_VRAM) {
                r = radeon_move_ram_vram(bo, evict, interruptible,
-                                           no_wait_reserve, no_wait_gpu, new_mem);
+                                           no_wait_gpu, new_mem);
        } else {
-               r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
+               r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
        }
 
        if (r) {
 memcpy:
-               r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+               r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
        }
        return r;
 }
index 785d095..2bb6d0e 100644 (file)
@@ -40,6 +40,12 @@ static int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
 static void rv515_gpu_init(struct radeon_device *rdev);
 int rv515_mc_wait_for_idle(struct radeon_device *rdev);
 
+static const u32 crtc_offsets[2] =
+{
+       0,
+       AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
 void rv515_debugfs(struct radeon_device *rdev)
 {
        if (r100_debugfs_rbbm_init(rdev)) {
@@ -281,30 +287,114 @@ static int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
 
 void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
 {
+       u32 crtc_enabled, tmp, frame_count, blackout;
+       int i, j;
+
        save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
        save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
 
-       /* Stop all video */
-       WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
+       /* disable VGA render */
        WREG32(R_000300_VGA_RENDER_CONTROL, 0);
-       WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
-       WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
-       WREG32(R_006080_D1CRTC_CONTROL, 0);
-       WREG32(R_006880_D2CRTC_CONTROL, 0);
-       WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
-       WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
-       WREG32(R_000330_D1VGA_CONTROL, 0);
-       WREG32(R_000338_D2VGA_CONTROL, 0);
+       /* blank the display controllers */
+       for (i = 0; i < rdev->num_crtc; i++) {
+               crtc_enabled = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN;
+               if (crtc_enabled) {
+                       save->crtc_enabled[i] = true;
+                       tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+                       if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
+                               radeon_wait_for_vblank(rdev, i);
+                               tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+                               WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+                       }
+                       /* wait for the next frame */
+                       frame_count = radeon_get_vblank_counter(rdev, i);
+                       for (j = 0; j < rdev->usec_timeout; j++) {
+                               if (radeon_get_vblank_counter(rdev, i) != frame_count)
+                                       break;
+                               udelay(1);
+                       }
+               } else {
+                       save->crtc_enabled[i] = false;
+               }
+       }
+
+       radeon_mc_wait_for_idle(rdev);
+
+       if (rdev->family >= CHIP_R600) {
+               if (rdev->family >= CHIP_RV770)
+                       blackout = RREG32(R700_MC_CITF_CNTL);
+               else
+                       blackout = RREG32(R600_CITF_CNTL);
+               if ((blackout & R600_BLACKOUT_MASK) != R600_BLACKOUT_MASK) {
+                       /* Block CPU access */
+                       WREG32(R600_BIF_FB_EN, 0);
+                       /* blackout the MC */
+                       blackout |= R600_BLACKOUT_MASK;
+                       if (rdev->family >= CHIP_RV770)
+                               WREG32(R700_MC_CITF_CNTL, blackout);
+                       else
+                               WREG32(R600_CITF_CNTL, blackout);
+               }
+       }
 }
 
 void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
 {
-       WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start);
-       WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start);
-       WREG32(R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start);
-       WREG32(R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start);
-       WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
-       /* Unlock host access */
+       u32 tmp, frame_count;
+       int i, j;
+
+       /* update crtc base addresses */
+       for (i = 0; i < rdev->num_crtc; i++) {
+               if (rdev->family >= CHIP_RV770) {
+                       if (i == 1) {
+                               WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+                                      upper_32_bits(rdev->mc.vram_start));
+                               WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+                                      upper_32_bits(rdev->mc.vram_start));
+                       } else {
+                               WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+                                      upper_32_bits(rdev->mc.vram_start));
+                               WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+                                      upper_32_bits(rdev->mc.vram_start));
+                       }
+               }
+               WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+                      (u32)rdev->mc.vram_start);
+               WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+                      (u32)rdev->mc.vram_start);
+       }
+       WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+
+       if (rdev->family >= CHIP_R600) {
+               /* unblackout the MC */
+               if (rdev->family >= CHIP_RV770)
+                       tmp = RREG32(R700_MC_CITF_CNTL);
+               else
+                       tmp = RREG32(R600_CITF_CNTL);
+               tmp &= ~R600_BLACKOUT_MASK;
+               if (rdev->family >= CHIP_RV770)
+                       WREG32(R700_MC_CITF_CNTL, tmp);
+               else
+                       WREG32(R600_CITF_CNTL, tmp);
+               /* allow CPU access */
+               WREG32(R600_BIF_FB_EN, R600_FB_READ_EN | R600_FB_WRITE_EN);
+       }
+
+       for (i = 0; i < rdev->num_crtc; i++) {
+               if (save->crtc_enabled[i]) {
+                       tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+                       tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+                       WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+                       /* wait for the next frame */
+                       frame_count = radeon_get_vblank_counter(rdev, i);
+                       for (j = 0; j < rdev->usec_timeout; j++) {
+                               if (radeon_get_vblank_counter(rdev, i) != frame_count)
+                                       break;
+                               udelay(1);
+                       }
+               }
+       }
+       /* Unlock vga access */
        WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
        mdelay(1);
        WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
index 79814a0..87c979c 100644 (file)
@@ -316,6 +316,7 @@ void r700_cp_stop(struct radeon_device *rdev)
        radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
        WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
        WREG32(SCRATCH_UMSK, 0);
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
 }
 
 static int rv770_cp_load_microcode(struct radeon_device *rdev)
@@ -583,6 +584,8 @@ static void rv770_gpu_init(struct radeon_device *rdev)
        WREG32(GB_TILING_CONFIG, gb_tiling_config);
        WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
        WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+       WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff));
+       WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff));
 
        WREG32(CGTS_SYS_TCC_DISABLE, 0);
        WREG32(CGTS_TCC_DISABLE, 0);
@@ -886,7 +889,7 @@ static int rv770_mc_init(struct radeon_device *rdev)
 
 static int rv770_startup(struct radeon_device *rdev)
 {
-       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+       struct radeon_ring *ring;
        int r;
 
        /* enable pcie gen2 link */
@@ -932,6 +935,12 @@ static int rv770_startup(struct radeon_device *rdev)
                return r;
        }
 
+       r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
@@ -941,11 +950,20 @@ static int rv770_startup(struct radeon_device *rdev)
        }
        r600_irq_set(rdev);
 
+       ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
                             R600_CP_RB_RPTR, R600_CP_RB_WPTR,
                             0, 0xfffff, RADEON_CP_PACKET2);
        if (r)
                return r;
+
+       ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+       r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+                            DMA_RB_RPTR, DMA_RB_WPTR,
+                            2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+       if (r)
+               return r;
+
        r = rv770_cp_load_microcode(rdev);
        if (r)
                return r;
@@ -953,6 +971,10 @@ static int rv770_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = r600_dma_resume(rdev);
+       if (r)
+               return r;
+
        r = radeon_ib_pool_init(rdev);
        if (r) {
                dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -995,7 +1017,7 @@ int rv770_suspend(struct radeon_device *rdev)
 {
        r600_audio_fini(rdev);
        r700_cp_stop(rdev);
-       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+       r600_dma_stop(rdev);
        r600_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        rv770_pcie_gart_disable(rdev);
@@ -1066,6 +1088,9 @@ int rv770_init(struct radeon_device *rdev)
        rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
        r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
+       rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+       r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
 
@@ -1078,6 +1103,7 @@ int rv770_init(struct radeon_device *rdev)
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                r700_cp_fini(rdev);
+               r600_dma_fini(rdev);
                r600_irq_fini(rdev);
                radeon_wb_fini(rdev);
                radeon_ib_pool_fini(rdev);
@@ -1093,6 +1119,7 @@ void rv770_fini(struct radeon_device *rdev)
 {
        r600_blit_fini(rdev);
        r700_cp_fini(rdev);
+       r600_dma_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
        radeon_ib_pool_fini(rdev);
index b0adfc5..20e29d2 100644 (file)
 #define     PIPE_TILING__SHIFT              1
 #define     PIPE_TILING__MASK               0x0000000e
 
+#define DMA_TILING_CONFIG                               0x3ec8
+#define DMA_TILING_CONFIG2                              0xd0b8
+
 #define        GC_USER_SHADER_PIPE_CONFIG                      0x8954
 #define                INACTIVE_QD_PIPES(x)                            ((x) << 8)
 #define                INACTIVE_QD_PIPES_MASK                          0x0000FF00
 
 #define        WAIT_UNTIL                                      0x8040
 
+/* async DMA */
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n)       ((((cmd) & 0xF) << 28) |        \
+                                        (((t) & 0x1) << 23) |          \
+                                        (((s) & 0x1) << 22) |          \
+                                        (((n) & 0xFFFF) << 0))
+/* async DMA Packet types */
+#define        DMA_PACKET_WRITE                                  0x2
+#define        DMA_PACKET_COPY                                   0x3
+#define        DMA_PACKET_INDIRECT_BUFFER                        0x4
+#define        DMA_PACKET_SEMAPHORE                              0x5
+#define        DMA_PACKET_FENCE                                  0x6
+#define        DMA_PACKET_TRAP                                   0x7
+#define        DMA_PACKET_CONSTANT_FILL                          0xd
+#define        DMA_PACKET_NOP                                    0xf
+
+
 #define        SRBM_STATUS                                     0x0E50
 
 /* DCE 3.2 HDMI */
 #define HDMI_OFFSET0                      (0x7400 - 0x7400)
 #define HDMI_OFFSET1                      (0x7800 - 0x7400)
 
+/* DCE3.2 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0        0x71c8 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1        0x71cc /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2        0x71d0 /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3        0x71d4 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4        0x71d8 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5        0x71dc /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6        0x71e0 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7        0x71e4 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8        0x71e8 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9        0x71ec /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10       0x71f0 /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11       0x71f4 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12       0x71f8 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13       0x71fc /* WMA Pro */
+#       define MAX_CHANNELS(x)                            (((x) & 0x7) << 0)
+/* max channels minus one.  7 = 8 channels */
+#       define SUPPORTED_FREQUENCIES(x)                   (((x) & 0xff) << 8)
+#       define DESCRIPTOR_BYTE_2(x)                       (((x) & 0xff) << 16)
+#       define SUPPORTED_FREQUENCIES_STEREO(x)            (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_HOT_PLUG_CONTROL                               0x7300
+#       define AZ_FORCE_CODEC_WAKE                        (1 << 0)
+#       define PIN0_JACK_DETECTION_ENABLE                 (1 << 4)
+#       define PIN1_JACK_DETECTION_ENABLE                 (1 << 5)
+#       define PIN2_JACK_DETECTION_ENABLE                 (1 << 6)
+#       define PIN3_JACK_DETECTION_ENABLE                 (1 << 7)
+#       define PIN0_UNSOLICITED_RESPONSE_ENABLE           (1 << 8)
+#       define PIN1_UNSOLICITED_RESPONSE_ENABLE           (1 << 9)
+#       define PIN2_UNSOLICITED_RESPONSE_ENABLE           (1 << 10)
+#       define PIN3_UNSOLICITED_RESPONSE_ENABLE           (1 << 11)
+#       define CODEC_HOT_PLUG_ENABLE                      (1 << 12)
+#       define PIN0_AUDIO_ENABLED                         (1 << 24)
+#       define PIN1_AUDIO_ENABLED                         (1 << 25)
+#       define PIN2_AUDIO_ENABLED                         (1 << 26)
+#       define PIN3_AUDIO_ENABLED                         (1 << 27)
+#       define AUDIO_ENABLED                              (1 << 31)
+
+
 #define D1GRPH_PRIMARY_SURFACE_ADDRESS                    0x6110
 #define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH               0x6914
 #define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH               0x6114
index b0db712..7e835d9 100644 (file)
@@ -1660,6 +1660,8 @@ static void si_gpu_init(struct radeon_device *rdev)
        WREG32(GB_ADDR_CONFIG, gb_addr_config);
        WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+       WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+       WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
 
        si_tiling_mode_table_init(rdev);
 
@@ -1836,6 +1838,9 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable)
                radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
                WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
                WREG32(SCRATCH_UMSK, 0);
+               rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+               rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+               rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
        }
        udelay(50);
 }
@@ -2426,9 +2431,20 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
        /* enable context1-15 */
        WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
               (u32)(rdev->dummy_page.addr >> 12));
-       WREG32(VM_CONTEXT1_CNTL2, 0);
+       WREG32(VM_CONTEXT1_CNTL2, 4);
        WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
-                               RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+                               RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
 
        si_pcie_gart_tlb_flush(rdev);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -2474,6 +2490,7 @@ static bool si_vm_reg_valid(u32 reg)
        /* check config regs */
        switch (reg) {
        case GRBM_GFX_INDEX:
+       case CP_STRMOUT_CNTL:
        case VGT_VTX_VECT_EJECT_REG:
        case VGT_CACHE_INVALIDATION:
        case VGT_ESGS_RING_SIZE:
@@ -2533,6 +2550,7 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
        u32 idx = pkt->idx + 1;
        u32 idx_value = ib[idx];
        u32 start_reg, end_reg, reg, i;
+       u32 command, info;
 
        switch (pkt->opcode) {
        case PACKET3_NOP:
@@ -2632,6 +2650,52 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
                                return -EINVAL;
                }
                break;
+       case PACKET3_CP_DMA:
+               command = ib[idx + 4];
+               info = ib[idx + 1];
+               if (command & PACKET3_CP_DMA_CMD_SAS) {
+                       /* src address space is register */
+                       if (((info & 0x60000000) >> 29) == 0) {
+                               start_reg = idx_value << 2;
+                               if (command & PACKET3_CP_DMA_CMD_SAIC) {
+                                       reg = start_reg;
+                                       if (!si_vm_reg_valid(reg)) {
+                                               DRM_ERROR("CP DMA Bad SRC register\n");
+                                               return -EINVAL;
+                                       }
+                               } else {
+                                       for (i = 0; i < (command & 0x1fffff); i++) {
+                                               reg = start_reg + (4 * i);
+                                               if (!si_vm_reg_valid(reg)) {
+                                                       DRM_ERROR("CP DMA Bad SRC register\n");
+                                                       return -EINVAL;
+                                               }
+                                       }
+                               }
+                       }
+               }
+               if (command & PACKET3_CP_DMA_CMD_DAS) {
+                       /* dst address space is register */
+                       if (((info & 0x00300000) >> 20) == 0) {
+                               start_reg = ib[idx + 2];
+                               if (command & PACKET3_CP_DMA_CMD_DAIC) {
+                                       reg = start_reg;
+                                       if (!si_vm_reg_valid(reg)) {
+                                               DRM_ERROR("CP DMA Bad DST register\n");
+                                               return -EINVAL;
+                                       }
+                               } else {
+                                       for (i = 0; i < (command & 0x1fffff); i++) {
+                                               reg = start_reg + (4 * i);
+                                               if (!si_vm_reg_valid(reg)) {
+                                                       DRM_ERROR("CP DMA Bad DST register\n");
+                                                       return -EINVAL;
+                                               }
+                                       }
+                               }
+                       }
+               }
+               break;
        default:
                DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
                return -EINVAL;
@@ -2808,30 +2872,86 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
 {
        struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
        uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-
-       while (count) {
-               unsigned ndw = 2 + count * 2;
-               if (ndw > 0x3FFE)
-                       ndw = 0x3FFE;
-
-               radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
-               radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
-                                        WRITE_DATA_DST_SEL(1)));
-               radeon_ring_write(ring, pe);
-               radeon_ring_write(ring, upper_32_bits(pe));
-               for (; ndw > 2; ndw -= 2, --count, pe += 8) {
-                       uint64_t value;
-                       if (flags & RADEON_VM_PAGE_SYSTEM) {
-                               value = radeon_vm_map_gart(rdev, addr);
-                               value &= 0xFFFFFFFFFFFFF000ULL;
-                       } else if (flags & RADEON_VM_PAGE_VALID)
-                               value = addr;
-                       else
-                               value = 0;
-                       addr += incr;
-                       value |= r600_flags;
-                       radeon_ring_write(ring, value);
-                       radeon_ring_write(ring, upper_32_bits(value));
+       uint64_t value;
+       unsigned ndw;
+
+       if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+               while (count) {
+                       ndw = 2 + count * 2;
+                       if (ndw > 0x3FFE)
+                               ndw = 0x3FFE;
+
+                       radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
+                       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+                                                WRITE_DATA_DST_SEL(1)));
+                       radeon_ring_write(ring, pe);
+                       radeon_ring_write(ring, upper_32_bits(pe));
+                       for (; ndw > 2; ndw -= 2, --count, pe += 8) {
+                               if (flags & RADEON_VM_PAGE_SYSTEM) {
+                                       value = radeon_vm_map_gart(rdev, addr);
+                                       value &= 0xFFFFFFFFFFFFF000ULL;
+                               } else if (flags & RADEON_VM_PAGE_VALID) {
+                                       value = addr;
+                               } else {
+                                       value = 0;
+                               }
+                               addr += incr;
+                               value |= r600_flags;
+                               radeon_ring_write(ring, value);
+                               radeon_ring_write(ring, upper_32_bits(value));
+                       }
+               }
+       } else {
+               /* DMA */
+               if (flags & RADEON_VM_PAGE_SYSTEM) {
+                       while (count) {
+                               ndw = count * 2;
+                               if (ndw > 0xFFFFE)
+                                       ndw = 0xFFFFE;
+
+                               /* for non-physically contiguous pages (system) */
+                               radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw));
+                               radeon_ring_write(ring, pe);
+                               radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+                               for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+                                       if (flags & RADEON_VM_PAGE_SYSTEM) {
+                                               value = radeon_vm_map_gart(rdev, addr);
+                                               value &= 0xFFFFFFFFFFFFF000ULL;
+                                       } else if (flags & RADEON_VM_PAGE_VALID) {
+                                               value = addr;
+                                       } else {
+                                               value = 0;
+                                       }
+                                       addr += incr;
+                                       value |= r600_flags;
+                                       radeon_ring_write(ring, value);
+                                       radeon_ring_write(ring, upper_32_bits(value));
+                               }
+                       }
+               } else {
+                       while (count) {
+                               ndw = count * 2;
+                               if (ndw > 0xFFFFE)
+                                       ndw = 0xFFFFE;
+
+                               if (flags & RADEON_VM_PAGE_VALID)
+                                       value = addr;
+                               else
+                                       value = 0;
+                               /* for physically contiguous pages (vram) */
+                               radeon_ring_write(ring, DMA_PTE_PDE_PACKET(ndw));
+                               radeon_ring_write(ring, pe); /* dst addr */
+                               radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+                               radeon_ring_write(ring, r600_flags); /* mask */
+                               radeon_ring_write(ring, 0);
+                               radeon_ring_write(ring, value); /* value */
+                               radeon_ring_write(ring, upper_32_bits(value));
+                               radeon_ring_write(ring, incr); /* increment size */
+                               radeon_ring_write(ring, 0);
+                               pe += ndw * 4;
+                               addr += (ndw / 2) * incr;
+                               count -= ndw / 2;
+                       }
                }
        }
 }
@@ -2879,6 +2999,32 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
        radeon_ring_write(ring, 0x0);
 }
 
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+       struct radeon_ring *ring = &rdev->ring[ridx];
+
+       if (vm == NULL)
+               return;
+
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+       if (vm->id < 8) {
+               radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+       } else {
+               radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
+       }
+       radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+       /* flush hdp cache */
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+       radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+       radeon_ring_write(ring, 1);
+
+       /* bits 0-7 are the VM contexts0-7 */
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+       radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+       radeon_ring_write(ring, 1 << vm->id);
+}
+
 /*
  * RLC
  */
@@ -3047,6 +3193,10 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
        WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
        WREG32(CP_INT_CNTL_RING1, 0);
        WREG32(CP_INT_CNTL_RING2, 0);
+       tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+       WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp);
+       tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+       WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
        WREG32(GRBM_INT_CNTL, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -3166,6 +3316,7 @@ int si_irq_set(struct radeon_device *rdev)
        u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
        u32 grbm_int_cntl = 0;
        u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
+       u32 dma_cntl, dma_cntl1;
 
        if (!rdev->irq.installed) {
                WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3186,6 +3337,9 @@ int si_irq_set(struct radeon_device *rdev)
        hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
        hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
 
+       dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+       dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+
        /* enable CP interrupts on all rings */
        if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
                DRM_DEBUG("si_irq_set: sw int gfx\n");
@@ -3199,6 +3353,15 @@ int si_irq_set(struct radeon_device *rdev)
                DRM_DEBUG("si_irq_set: sw int cp2\n");
                cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
        }
+       if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+               DRM_DEBUG("si_irq_set: sw int dma\n");
+               dma_cntl |= TRAP_ENABLE;
+       }
+
+       if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+               DRM_DEBUG("si_irq_set: sw int dma1\n");
+               dma_cntl1 |= TRAP_ENABLE;
+       }
        if (rdev->irq.crtc_vblank_int[0] ||
            atomic_read(&rdev->irq.pflip[0])) {
                DRM_DEBUG("si_irq_set: vblank 0\n");
@@ -3258,6 +3421,9 @@ int si_irq_set(struct radeon_device *rdev)
        WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
        WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
 
+       WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl);
+       WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1);
+
        WREG32(GRBM_INT_CNTL, grbm_int_cntl);
 
        WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3683,6 +3849,16 @@ restart_ih:
                                break;
                        }
                        break;
+               case 146:
+               case 147:
+                       dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+                       dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+                               RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+                       dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+                               RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+                       /* reset addr and status */
+                       WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+                       break;
                case 176: /* RINGID0 CP_INT */
                        radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                        break;
@@ -3706,9 +3882,17 @@ restart_ih:
                                break;
                        }
                        break;
+               case 224: /* DMA trap event */
+                       DRM_DEBUG("IH: DMA trap\n");
+                       radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+                       break;
                case 233: /* GUI IDLE */
                        DRM_DEBUG("IH: GUI idle\n");
                        break;
+               case 244: /* DMA trap event */
+                       DRM_DEBUG("IH: DMA1 trap\n");
+                       radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+                       break;
                default:
                        DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
                        break;
@@ -3732,6 +3916,80 @@ restart_ih:
        return IRQ_HANDLED;
 }
 
+/**
+ * si_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (SI).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int si_copy_dma(struct radeon_device *rdev,
+               uint64_t src_offset, uint64_t dst_offset,
+               unsigned num_gpu_pages,
+               struct radeon_fence **fence)
+{
+       struct radeon_semaphore *sem = NULL;
+       int ring_index = rdev->asic->copy.dma_ring_index;
+       struct radeon_ring *ring = &rdev->ring[ring_index];
+       u32 size_in_bytes, cur_size_in_bytes;
+       int i, num_loops;
+       int r = 0;
+
+       r = radeon_semaphore_create(rdev, &sem);
+       if (r) {
+               DRM_ERROR("radeon: moving bo (%d).\n", r);
+               return r;
+       }
+
+       size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+       num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
+       r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+       if (r) {
+               DRM_ERROR("radeon: moving bo (%d).\n", r);
+               radeon_semaphore_free(rdev, &sem, NULL);
+               return r;
+       }
+
+       if (radeon_fence_need_sync(*fence, ring->idx)) {
+               radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+                                           ring->idx);
+               radeon_fence_note_sync(*fence, ring->idx);
+       } else {
+               radeon_semaphore_free(rdev, &sem, NULL);
+       }
+
+       for (i = 0; i < num_loops; i++) {
+               cur_size_in_bytes = size_in_bytes;
+               if (cur_size_in_bytes > 0xFFFFF)
+                       cur_size_in_bytes = 0xFFFFF;
+               size_in_bytes -= cur_size_in_bytes;
+               radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
+               radeon_ring_write(ring, dst_offset & 0xffffffff);
+               radeon_ring_write(ring, src_offset & 0xffffffff);
+               radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+               radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+               src_offset += cur_size_in_bytes;
+               dst_offset += cur_size_in_bytes;
+       }
+
+       r = radeon_fence_emit(rdev, fence, ring->idx);
+       if (r) {
+               radeon_ring_unlock_undo(rdev, ring);
+               return r;
+       }
+
+       radeon_ring_unlock_commit(rdev, ring);
+       radeon_semaphore_free(rdev, &sem, *fence);
+
+       return r;
+}
+
 /*
  * startup/shutdown callbacks
  */
@@ -3803,6 +4061,18 @@ static int si_startup(struct radeon_device *rdev)
                return r;
        }
 
+       r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+               return r;
+       }
+
+       r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r = si_irq_init(rdev);
        if (r) {
@@ -3833,6 +4103,22 @@ static int si_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+       r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+                            DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
+                            DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
+                            2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+       if (r)
+               return r;
+
+       ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+       r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+                            DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
+                            DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
+                            2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+       if (r)
+               return r;
+
        r = si_cp_load_microcode(rdev);
        if (r)
                return r;
@@ -3840,6 +4126,10 @@ static int si_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = cayman_dma_resume(rdev);
+       if (r)
+               return r;
+
        r = radeon_ib_pool_init(rdev);
        if (r) {
                dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -3881,9 +4171,7 @@ int si_resume(struct radeon_device *rdev)
 int si_suspend(struct radeon_device *rdev)
 {
        si_cp_enable(rdev, false);
-       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
-       rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
-       rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+       cayman_dma_stop(rdev);
        si_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        si_pcie_gart_disable(rdev);
@@ -3961,6 +4249,14 @@ int si_init(struct radeon_device *rdev)
        ring->ring_obj = NULL;
        r600_ring_init(rdev, ring, 1024 * 1024);
 
+       ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+       ring->ring_obj = NULL;
+       r600_ring_init(rdev, ring, 64 * 1024);
+
+       ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+       ring->ring_obj = NULL;
+       r600_ring_init(rdev, ring, 64 * 1024);
+
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
 
@@ -3973,6 +4269,7 @@ int si_init(struct radeon_device *rdev)
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                si_cp_fini(rdev);
+               cayman_dma_fini(rdev);
                si_irq_fini(rdev);
                si_rlc_fini(rdev);
                radeon_wb_fini(rdev);
@@ -4001,6 +4298,7 @@ void si_fini(struct radeon_device *rdev)
        r600_blit_fini(rdev);
 #endif
        si_cp_fini(rdev);
+       cayman_dma_fini(rdev);
        si_irq_fini(rdev);
        si_rlc_fini(rdev);
        radeon_wb_fini(rdev);
index 7d2a20e..62b4621 100644 (file)
 #define VM_CONTEXT0_CNTL                               0x1410
 #define                ENABLE_CONTEXT                                  (1 << 0)
 #define                PAGE_TABLE_DEPTH(x)                             (((x) & 3) << 1)
+#define                RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT         (1 << 3)
 #define                RANGE_PROTECTION_FAULT_ENABLE_DEFAULT           (1 << 4)
+#define                DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT    (1 << 6)
+#define                DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT      (1 << 7)
+#define                PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT          (1 << 9)
+#define                PDE0_PROTECTION_FAULT_ENABLE_DEFAULT            (1 << 10)
+#define                VALID_PROTECTION_FAULT_ENABLE_INTERRUPT         (1 << 12)
+#define                VALID_PROTECTION_FAULT_ENABLE_DEFAULT           (1 << 13)
+#define                READ_PROTECTION_FAULT_ENABLE_INTERRUPT          (1 << 15)
+#define                READ_PROTECTION_FAULT_ENABLE_DEFAULT            (1 << 16)
+#define                WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT         (1 << 18)
+#define                WRITE_PROTECTION_FAULT_ENABLE_DEFAULT           (1 << 19)
 #define VM_CONTEXT1_CNTL                               0x1414
 #define VM_CONTEXT0_CNTL2                              0x1430
 #define VM_CONTEXT1_CNTL2                              0x1434
 #define        VM_CONTEXT14_PAGE_TABLE_BASE_ADDR               0x1450
 #define        VM_CONTEXT15_PAGE_TABLE_BASE_ADDR               0x1454
 
+#define        VM_CONTEXT1_PROTECTION_FAULT_ADDR               0x14FC
+#define        VM_CONTEXT1_PROTECTION_FAULT_STATUS             0x14DC
+
 #define VM_INVALIDATE_REQUEST                          0x1478
 #define VM_INVALIDATE_RESPONSE                         0x147c
 
 #       define RDERR_INT_ENABLE                         (1 << 0)
 #       define GUI_IDLE_INT_ENABLE                      (1 << 19)
 
+#define        CP_STRMOUT_CNTL                                 0x84FC
 #define        SCRATCH_REG0                                    0x8500
 #define        SCRATCH_REG1                                    0x8504
 #define        SCRATCH_REG2                                    0x8508
 #define        PACKET3_WAIT_REG_MEM                            0x3C
 #define        PACKET3_MEM_WRITE                               0x3D
 #define        PACKET3_COPY_DATA                               0x40
+#define        PACKET3_CP_DMA                                  0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ *    SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+#              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
+                /* 0 - SRC_ADDR
+                * 1 - GDS
+                */
+#              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
+                /* 0 - ME
+                * 1 - PFP
+                */
+#              define PACKET3_CP_DMA_SRC_SEL(x)    ((x) << 29)
+                /* 0 - SRC_ADDR
+                * 1 - GDS
+                * 2 - DATA
+                */
+#              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
+/* COMMAND */
+#              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+                /* 0 - none
+                * 1 - 8 in 16
+                * 2 - 8 in 32
+                * 3 - 8 in 64
+                */
+#              define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+                * 1 - 8 in 16
+                * 2 - 8 in 32
+                * 3 - 8 in 64
+                */
+#              define PACKET3_CP_DMA_CMD_SAS       (1 << 26)
+                /* 0 - memory
+                * 1 - register
+                */
+#              define PACKET3_CP_DMA_CMD_DAS       (1 << 27)
+                /* 0 - memory
+                * 1 - register
+                */
+#              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
+#              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
+#              define PACKET3_CP_DMA_CMD_RAW_WAIT  (1 << 30)
 #define        PACKET3_PFP_SYNC_ME                             0x42
 #define        PACKET3_SURFACE_SYNC                            0x43
 #              define PACKET3_DEST_BASE_0_ENA      (1 << 0)
 #define        PACKET3_WAIT_ON_AVAIL_BUFFER                    0x8A
 #define        PACKET3_SWITCH_BUFFER                           0x8B
 
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET                              0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET                              0x800 /* not a register */
+
+#define DMA_RB_CNTL                                       0xd000
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_RB_BASE                                       0xd004
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI                               0xd01c
+#define DMA_RB_RPTR_ADDR_LO                               0xd020
+
+#define DMA_IB_CNTL                                       0xd024
+#       define DMA_IB_ENABLE                              (1 << 0)
+#       define DMA_IB_SWAP_ENABLE                         (1 << 4)
+#define DMA_IB_RPTR                                       0xd028
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_TILING_CONFIG                                0xd0b8
+
+#define DMA_PACKET(cmd, b, t, s, n)    ((((cmd) & 0xF) << 28) |        \
+                                        (((b) & 0x1) << 26) |          \
+                                        (((t) & 0x1) << 23) |          \
+                                        (((s) & 0x1) << 22) |          \
+                                        (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n)    ((((cmd) & 0xF) << 28) |        \
+                                        (((vmid) & 0xF) << 20) |       \
+                                        (((n) & 0xFFFFF) << 0))
+
+#define DMA_PTE_PDE_PACKET(n)          ((2 << 28) |                    \
+                                        (1 << 26) |                    \
+                                        (1 << 21) |                    \
+                                        (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define        DMA_PACKET_WRITE                                  0x2
+#define        DMA_PACKET_COPY                                   0x3
+#define        DMA_PACKET_INDIRECT_BUFFER                        0x4
+#define        DMA_PACKET_SEMAPHORE                              0x5
+#define        DMA_PACKET_FENCE                                  0x6
+#define        DMA_PACKET_TRAP                                   0x7
+#define        DMA_PACKET_SRBM_WRITE                             0x9
+#define        DMA_PACKET_CONSTANT_FILL                          0xd
+#define        DMA_PACKET_NOP                                    0xf
+
 #endif
index 0e7a930..d917a41 100644 (file)
@@ -748,7 +748,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev,
        connector->encoder = encoder;
 
        drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
-       drm_connector_property_set_value(connector,
+       drm_object_property_set_value(&connector->base,
                sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
 
        return 0;
index 53b9852..0744103 100644 (file)
@@ -218,7 +218,7 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
        }
 
        bpp = crtc->fb->bits_per_pixel / 8;
-       win.stride = win.outw * bpp;
+       win.stride = crtc->fb->pitches[0];
 
        /* program window registers */
        value = tegra_dc_readl(dc, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -818,6 +818,7 @@ static int tegra_dc_remove(struct platform_device *pdev)
 }
 
 static struct of_device_id tegra_dc_of_match[] = {
+       { .compatible = "nvidia,tegra30-dc", },
        { .compatible = "nvidia,tegra20-dc", },
        { },
 };
index 58f55dc..ab40164 100644 (file)
@@ -1318,8 +1318,8 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
 }
 
 static struct of_device_id tegra_hdmi_of_match[] = {
-       { .compatible = "nvidia,tegra20-hdmi", },
        { .compatible = "nvidia,tegra30-hdmi", },
+       { .compatible = "nvidia,tegra20-hdmi", },
        { },
 };
 
index 1f728cd..bdb97a5 100644 (file)
@@ -68,6 +68,8 @@ static int host1x_parse_dt(struct host1x *host1x)
        static const char * const compat[] = {
                "nvidia,tegra20-dc",
                "nvidia,tegra20-hdmi",
+               "nvidia,tegra30-dc",
+               "nvidia,tegra30-hdmi",
        };
        unsigned int i;
        int err;
@@ -268,6 +270,7 @@ int host1x_unregister_client(struct host1x *host1x,
 }
 
 static struct of_device_id tegra_host1x_of_match[] = {
+       { .compatible = "nvidia,tegra30-host1x", },
        { .compatible = "nvidia,tegra20-host1x", },
        { },
 };
index 7426fe5..a915133 100644 (file)
@@ -366,7 +366,7 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
                                  struct ttm_mem_reg *mem,
                                  bool evict, bool interruptible,
-                                 bool no_wait_reserve, bool no_wait_gpu)
+                                 bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -420,12 +420,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 
        if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
            !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
-               ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
+               ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
        else if (bdev->driver->move)
                ret = bdev->driver->move(bo, evict, interruptible,
-                                        no_wait_reserve, no_wait_gpu, mem);
+                                        no_wait_gpu, mem);
        else
-               ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
+               ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
 
        if (ret) {
                if (bdev->driver->move_notify) {
@@ -488,39 +488,33 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
        ttm_bo_mem_put(bo, &bo->mem);
 
        atomic_set(&bo->reserved, 0);
+       wake_up_all(&bo->event_queue);
 
        /*
-        * Make processes trying to reserve really pick it up.
+        * Since the final reference to this bo may not be dropped by
+        * the current task we have to put a memory barrier here to make
+        * sure the changes done in this function are always visible.
+        *
+        * This function only needs protection against the final kref_put.
         */
-       smp_mb__after_atomic_dec();
-       wake_up_all(&bo->event_queue);
+       smp_mb__before_atomic_dec();
 }
 
 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_bo_global *glob = bo->glob;
-       struct ttm_bo_driver *driver;
+       struct ttm_bo_driver *driver = bdev->driver;
        void *sync_obj = NULL;
        int put_count;
        int ret;
 
+       spin_lock(&glob->lru_lock);
+       ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+
        spin_lock(&bdev->fence_lock);
        (void) ttm_bo_wait(bo, false, false, true);
-       if (!bo->sync_obj) {
-
-               spin_lock(&glob->lru_lock);
-
-               /**
-                * Lock inversion between bo:reserve and bdev::fence_lock here,
-                * but that's OK, since we're only trylocking.
-                */
-
-               ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-
-               if (unlikely(ret == -EBUSY))
-                       goto queue;
-
+       if (!ret && !bo->sync_obj) {
                spin_unlock(&bdev->fence_lock);
                put_count = ttm_bo_del_from_lru(bo);
 
@@ -530,18 +524,19 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
                ttm_bo_list_ref_sub(bo, put_count, true);
 
                return;
-       } else {
-               spin_lock(&glob->lru_lock);
        }
-queue:
-       driver = bdev->driver;
        if (bo->sync_obj)
                sync_obj = driver->sync_obj_ref(bo->sync_obj);
+       spin_unlock(&bdev->fence_lock);
+
+       if (!ret) {
+               atomic_set(&bo->reserved, 0);
+               wake_up_all(&bo->event_queue);
+       }
 
        kref_get(&bo->list_kref);
        list_add_tail(&bo->ddestroy, &bdev->ddestroy);
        spin_unlock(&glob->lru_lock);
-       spin_unlock(&bdev->fence_lock);
 
        if (sync_obj) {
                driver->sync_obj_flush(sync_obj);
@@ -552,68 +547,84 @@ queue:
 }
 
 /**
- * function ttm_bo_cleanup_refs
+ * function ttm_bo_cleanup_refs_and_unlock
  * If bo idle, remove from delayed- and lru lists, and unref.
  * If not idle, do nothing.
  *
+ * Must be called with lru_lock and reservation held, this function
+ * will drop both before returning.
+ *
  * @interruptible         Any sleeps should occur interruptibly.
- * @no_wait_reserve       Never wait for reserve. Return -EBUSY instead.
  * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
  */
 
-static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
-                              bool interruptible,
-                              bool no_wait_reserve,
-                              bool no_wait_gpu)
+static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
+                                         bool interruptible,
+                                         bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_bo_driver *driver = bdev->driver;
        struct ttm_bo_global *glob = bo->glob;
        int put_count;
-       int ret = 0;
+       int ret;
 
-retry:
        spin_lock(&bdev->fence_lock);
-       ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
-       spin_unlock(&bdev->fence_lock);
+       ret = ttm_bo_wait(bo, false, false, true);
 
-       if (unlikely(ret != 0))
-               return ret;
+       if (ret && !no_wait_gpu) {
+               void *sync_obj;
 
-retry_reserve:
-       spin_lock(&glob->lru_lock);
+               /*
+                * Take a reference to the fence and unreserve,
+                * at this point the buffer should be dead, so
+                * no new sync objects can be attached.
+                */
+               sync_obj = driver->sync_obj_ref(&bo->sync_obj);
+               spin_unlock(&bdev->fence_lock);
 
-       if (unlikely(list_empty(&bo->ddestroy))) {
+               atomic_set(&bo->reserved, 0);
+               wake_up_all(&bo->event_queue);
                spin_unlock(&glob->lru_lock);
-               return 0;
-       }
-
-       ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
 
-       if (unlikely(ret == -EBUSY)) {
-               spin_unlock(&glob->lru_lock);
-               if (likely(!no_wait_reserve))
-                       ret = ttm_bo_wait_unreserved(bo, interruptible);
-               if (unlikely(ret != 0))
+               ret = driver->sync_obj_wait(sync_obj, false, interruptible);
+               driver->sync_obj_unref(&sync_obj);
+               if (ret)
                        return ret;
 
-               goto retry_reserve;
-       }
+               /*
+                * remove sync_obj with ttm_bo_wait, the wait should be
+                * finished, and no new wait object should have been added.
+                */
+               spin_lock(&bdev->fence_lock);
+               ret = ttm_bo_wait(bo, false, false, true);
+               WARN_ON(ret);
+               spin_unlock(&bdev->fence_lock);
+               if (ret)
+                       return ret;
 
-       BUG_ON(ret != 0);
+               spin_lock(&glob->lru_lock);
+               ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
 
-       /**
-        * We can re-check for sync object without taking
-        * the bo::lock since setting the sync object requires
-        * also bo::reserved. A busy object at this point may
-        * be caused by another thread recently starting an accelerated
-        * eviction.
-        */
+               /*
+                * We raced, and lost, someone else holds the reservation now,
+                * and is probably busy in ttm_bo_cleanup_memtype_use.
+                *
+                * Even if it's not the case, because we finished waiting any
+                * delayed destruction would succeed, so just return success
+                * here.
+                */
+               if (ret) {
+                       spin_unlock(&glob->lru_lock);
+                       return 0;
+               }
+       } else
+               spin_unlock(&bdev->fence_lock);
 
-       if (unlikely(bo->sync_obj)) {
+       if (ret || unlikely(list_empty(&bo->ddestroy))) {
                atomic_set(&bo->reserved, 0);
                wake_up_all(&bo->event_queue);
                spin_unlock(&glob->lru_lock);
-               goto retry;
+               return ret;
        }
 
        put_count = ttm_bo_del_from_lru(bo);
@@ -656,9 +667,13 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
                        kref_get(&nentry->list_kref);
                }
 
-               spin_unlock(&glob->lru_lock);
-               ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
-                                         !remove_all);
+               ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
+               if (!ret)
+                       ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
+                                                            !remove_all);
+               else
+                       spin_unlock(&glob->lru_lock);
+
                kref_put(&entry->list_kref, ttm_bo_release_list);
                entry = nentry;
 
@@ -696,6 +711,7 @@ static void ttm_bo_release(struct kref *kref)
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
 
+       write_lock(&bdev->vm_lock);
        if (likely(bo->vm_node != NULL)) {
                rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
                drm_mm_put_block(bo->vm_node);
@@ -707,18 +723,14 @@ static void ttm_bo_release(struct kref *kref)
        ttm_mem_io_unlock(man);
        ttm_bo_cleanup_refs_or_queue(bo);
        kref_put(&bo->list_kref, ttm_bo_release_list);
-       write_lock(&bdev->vm_lock);
 }
 
 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
 {
        struct ttm_buffer_object *bo = *p_bo;
-       struct ttm_bo_device *bdev = bo->bdev;
 
        *p_bo = NULL;
-       write_lock(&bdev->vm_lock);
        kref_put(&bo->kref, ttm_bo_release);
-       write_unlock(&bdev->vm_lock);
 }
 EXPORT_SYMBOL(ttm_bo_unref);
 
@@ -737,7 +749,7 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
 
 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
-                       bool no_wait_reserve, bool no_wait_gpu)
+                       bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_mem_reg evict_mem;
@@ -768,7 +780,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
        placement.num_busy_placement = 0;
        bdev->driver->evict_flags(bo, &placement);
        ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
-                               no_wait_reserve, no_wait_gpu);
+                               no_wait_gpu);
        if (ret) {
                if (ret != -ERESTARTSYS) {
                        pr_err("Failed to find memory space for buffer 0x%p eviction\n",
@@ -779,7 +791,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
        }
 
        ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
-                                    no_wait_reserve, no_wait_gpu);
+                                    no_wait_gpu);
        if (ret) {
                if (ret != -ERESTARTSYS)
                        pr_err("Buffer eviction failed\n");
@@ -793,49 +805,33 @@ out:
 
 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
                                uint32_t mem_type,
-                               bool interruptible, bool no_wait_reserve,
+                               bool interruptible,
                                bool no_wait_gpu)
 {
        struct ttm_bo_global *glob = bdev->glob;
        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
        struct ttm_buffer_object *bo;
-       int ret, put_count = 0;
+       int ret = -EBUSY, put_count;
 
-retry:
        spin_lock(&glob->lru_lock);
-       if (list_empty(&man->lru)) {
-               spin_unlock(&glob->lru_lock);
-               return -EBUSY;
+       list_for_each_entry(bo, &man->lru, lru) {
+               ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+               if (!ret)
+                       break;
        }
 
-       bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
-       kref_get(&bo->list_kref);
-
-       if (!list_empty(&bo->ddestroy)) {
+       if (ret) {
                spin_unlock(&glob->lru_lock);
-               ret = ttm_bo_cleanup_refs(bo, interruptible,
-                                         no_wait_reserve, no_wait_gpu);
-               kref_put(&bo->list_kref, ttm_bo_release_list);
-
                return ret;
        }
 
-       ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-
-       if (unlikely(ret == -EBUSY)) {
-               spin_unlock(&glob->lru_lock);
-               if (likely(!no_wait_reserve))
-                       ret = ttm_bo_wait_unreserved(bo, interruptible);
+       kref_get(&bo->list_kref);
 
+       if (!list_empty(&bo->ddestroy)) {
+               ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
+                                                    no_wait_gpu);
                kref_put(&bo->list_kref, ttm_bo_release_list);
-
-               /**
-                * We *need* to retry after releasing the lru lock.
-                */
-
-               if (unlikely(ret != 0))
-                       return ret;
-               goto retry;
+               return ret;
        }
 
        put_count = ttm_bo_del_from_lru(bo);
@@ -845,7 +841,7 @@ retry:
 
        ttm_bo_list_ref_sub(bo, put_count, true);
 
-       ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
+       ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
        ttm_bo_unreserve(bo);
 
        kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -870,7 +866,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                                        struct ttm_placement *placement,
                                        struct ttm_mem_reg *mem,
                                        bool interruptible,
-                                       bool no_wait_reserve,
                                        bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
@@ -883,8 +878,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                        return ret;
                if (mem->mm_node)
                        break;
-               ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
-                                               no_wait_reserve, no_wait_gpu);
+               ret = ttm_mem_evict_first(bdev, mem_type,
+                                         interruptible, no_wait_gpu);
                if (unlikely(ret != 0))
                        return ret;
        } while (1);
@@ -949,7 +944,7 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                        struct ttm_placement *placement,
                        struct ttm_mem_reg *mem,
-                       bool interruptible, bool no_wait_reserve,
+                       bool interruptible,
                        bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
@@ -1040,7 +1035,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                }
 
                ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
-                                               interruptible, no_wait_reserve, no_wait_gpu);
+                                               interruptible, no_wait_gpu);
                if (ret == 0 && mem->mm_node) {
                        mem->placement = cur_flags;
                        return 0;
@@ -1055,7 +1050,7 @@ EXPORT_SYMBOL(ttm_bo_mem_space);
 
 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
                        struct ttm_placement *placement,
-                       bool interruptible, bool no_wait_reserve,
+                       bool interruptible,
                        bool no_wait_gpu)
 {
        int ret = 0;
@@ -1082,10 +1077,12 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
        /*
         * Determine where to move the buffer.
         */
-       ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
+       ret = ttm_bo_mem_space(bo, placement, &mem,
+                              interruptible, no_wait_gpu);
        if (ret)
                goto out_unlock;
-       ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
+       ret = ttm_bo_handle_move_mem(bo, &mem, false,
+                                    interruptible, no_wait_gpu);
 out_unlock:
        if (ret && mem.mm_node)
                ttm_bo_mem_put(bo, &mem);
@@ -1114,7 +1111,7 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
 
 int ttm_bo_validate(struct ttm_buffer_object *bo,
                        struct ttm_placement *placement,
-                       bool interruptible, bool no_wait_reserve,
+                       bool interruptible,
                        bool no_wait_gpu)
 {
        int ret;
@@ -1130,7 +1127,8 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
         */
        ret = ttm_bo_mem_compat(placement, &bo->mem);
        if (ret < 0) {
-               ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
+               ret = ttm_bo_move_buffer(bo, placement, interruptible,
+                                        no_wait_gpu);
                if (ret)
                        return ret;
        } else {
@@ -1243,7 +1241,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
                        goto out_err;
        }
 
-       ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+       ret = ttm_bo_validate(bo, placement, interruptible, false);
        if (ret)
                goto out_err;
 
@@ -1329,7 +1327,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
        spin_lock(&glob->lru_lock);
        while (!list_empty(&man->lru)) {
                spin_unlock(&glob->lru_lock);
-               ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
+               ret = ttm_mem_evict_first(bdev, mem_type, false, false);
                if (ret) {
                        if (allow_errors) {
                                return ret;
@@ -1797,40 +1795,25 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
        uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
 
        spin_lock(&glob->lru_lock);
-       while (ret == -EBUSY) {
-               if (unlikely(list_empty(&glob->swap_lru))) {
-                       spin_unlock(&glob->lru_lock);
-                       return -EBUSY;
-               }
-
-               bo = list_first_entry(&glob->swap_lru,
-                                     struct ttm_buffer_object, swap);
-               kref_get(&bo->list_kref);
+       list_for_each_entry(bo, &glob->swap_lru, swap) {
+               ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+               if (!ret)
+                       break;
+       }
 
-               if (!list_empty(&bo->ddestroy)) {
-                       spin_unlock(&glob->lru_lock);
-                       (void) ttm_bo_cleanup_refs(bo, false, false, false);
-                       kref_put(&bo->list_kref, ttm_bo_release_list);
-                       spin_lock(&glob->lru_lock);
-                       continue;
-               }
+       if (ret) {
+               spin_unlock(&glob->lru_lock);
+               return ret;
+       }
 
-               /**
-                * Reserve buffer. Since we unlock while sleeping, we need
-                * to re-check that nobody removed us from the swap-list while
-                * we slept.
-                */
+       kref_get(&bo->list_kref);
 
-               ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-               if (unlikely(ret == -EBUSY)) {
-                       spin_unlock(&glob->lru_lock);
-                       ttm_bo_wait_unreserved(bo, false);
-                       kref_put(&bo->list_kref, ttm_bo_release_list);
-                       spin_lock(&glob->lru_lock);
-               }
+       if (!list_empty(&bo->ddestroy)) {
+               ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
+               kref_put(&bo->list_kref, ttm_bo_release_list);
+               return ret;
        }
 
-       BUG_ON(ret != 0);
        put_count = ttm_bo_del_from_lru(bo);
        spin_unlock(&glob->lru_lock);
 
@@ -1856,7 +1839,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
                evict_mem.mem_type = TTM_PL_SYSTEM;
 
                ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
-                                            false, false, false);
+                                            false, false);
                if (unlikely(ret != 0))
                        goto out;
        }
index b9c4e51..9e9c5d2 100644 (file)
@@ -43,7 +43,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
 }
 
 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-                   bool evict, bool no_wait_reserve,
+                   bool evict,
                    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
        struct ttm_tt *ttm = bo->ttm;
@@ -314,7 +314,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
 }
 
 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-                      bool evict, bool no_wait_reserve, bool no_wait_gpu,
+                      bool evict, bool no_wait_gpu,
                       struct ttm_mem_reg *new_mem)
 {
        struct ttm_bo_device *bdev = bo->bdev;
@@ -611,7 +611,7 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
 
 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                              void *sync_obj,
-                             bool evict, bool no_wait_reserve,
+                             bool evict,
                              bool no_wait_gpu,
                              struct ttm_mem_reg *new_mem)
 {
index 3ba72db..74705f3 100644 (file)
@@ -259,8 +259,8 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
        read_lock(&bdev->vm_lock);
        bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
                                 (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
-       if (likely(bo != NULL))
-               ttm_bo_reference(bo);
+       if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
+               bo = NULL;
        read_unlock(&bdev->vm_lock);
 
        if (unlikely(bo == NULL)) {
index 1986d00..cd9e452 100644 (file)
@@ -213,8 +213,8 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
        driver = bdev->driver;
        glob = bo->glob;
 
-       spin_lock(&bdev->fence_lock);
        spin_lock(&glob->lru_lock);
+       spin_lock(&bdev->fence_lock);
 
        list_for_each_entry(entry, list, head) {
                bo = entry->bo;
@@ -223,8 +223,8 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
                ttm_bo_unreserve_locked(bo);
                entry->reserved = false;
        }
-       spin_unlock(&glob->lru_lock);
        spin_unlock(&bdev->fence_lock);
+       spin_unlock(&glob->lru_lock);
 
        list_for_each_entry(entry, list, head) {
                if (entry->old_sync_obj)
index f18eeb4..58a5f32 100644 (file)
@@ -157,11 +157,11 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
        base->refcount_release = refcount_release;
        base->ref_obj_release = ref_obj_release;
        base->object_type = object_type;
-       spin_lock(&tdev->object_lock);
        kref_init(&base->refcount);
-       ret = drm_ht_just_insert_please(&tdev->object_hash,
-                                       &base->hash,
-                                       (unsigned long)base, 31, 0, 0);
+       spin_lock(&tdev->object_lock);
+       ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
+                                           &base->hash,
+                                           (unsigned long)base, 31, 0, 0);
        spin_unlock(&tdev->object_lock);
        if (unlikely(ret != 0))
                goto out_err0;
@@ -174,7 +174,9 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
 
        return 0;
 out_err1:
-       (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+       spin_lock(&tdev->object_lock);
+       (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
+       spin_unlock(&tdev->object_lock);
 out_err0:
        return ret;
 }
@@ -187,8 +189,15 @@ static void ttm_release_base(struct kref *kref)
        struct ttm_object_device *tdev = base->tfile->tdev;
 
        spin_lock(&tdev->object_lock);
-       (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+       (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
        spin_unlock(&tdev->object_lock);
+
+       /*
+        * Note: We don't use synchronize_rcu() here because it's far
+        * too slow. It's up to the user to free the object using
+        * call_rcu() or ttm_base_object_kfree().
+        */
+
        if (base->refcount_release) {
                ttm_object_file_unref(&base->tfile);
                base->refcount_release(&base);
@@ -214,7 +223,7 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
        int ret;
 
        rcu_read_lock();
-       ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
+       ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
 
        if (likely(ret == 0)) {
                base = drm_hash_entry(hash, struct ttm_base_object, hash);
index 860dc48..bd2a3b4 100644 (file)
@@ -749,7 +749,10 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
        /* clear the pages coming from the pool if requested */
        if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
                list_for_each_entry(p, &plist, lru) {
-                       clear_page(page_address(p));
+                       if (PageHighMem(p))
+                               clear_highpage(p);
+                       else
+                               clear_page(page_address(p));
                }
        }
 
index bf82601..7d759a4 100644 (file)
@@ -308,9 +308,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
                if (unlikely(to_page == NULL))
                        goto out_err;
 
-               preempt_disable();
                copy_highpage(to_page, from_page);
-               preempt_enable();
                page_cache_release(from_page);
        }
 
@@ -358,9 +356,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
                        ret = PTR_ERR(to_page);
                        goto out_err;
                }
-               preempt_disable();
                copy_highpage(to_page, from_page);
-               preempt_enable();
                set_page_dirty(to_page);
                mark_page_accessed(to_page);
                page_cache_release(to_page);
index 25b93de..512f44a 100644 (file)
@@ -140,7 +140,7 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
        drm_sysfs_connector_add(connector);
        drm_mode_connector_attach_encoder(connector, encoder);
 
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                      dev->mode_config.dirty_info_property,
                                      1);
        return 0;
index 586869c..2cc6cd9 100644 (file)
@@ -5,6 +5,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
            vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
            vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
            vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
-           vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o
+           vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
+           vmwgfx_surface.o
 
 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
new file mode 100644 (file)
index 0000000..8369c3b
--- /dev/null
@@ -0,0 +1,909 @@
+/**************************************************************************
+ *
+ * Copyright Â© 2008-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifdef __KERNEL__
+
+#include <drm/vmwgfx_drm.h>
+#define surf_size_struct struct drm_vmw_size
+
+#else /* __KERNEL__ */
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
+#endif /* ARRAY_SIZE */
+
+#define DIV_ROUND_UP(x, y)  (((x) + (y) - 1) / (y))
+#define max_t(type, x, y)  ((x) > (y) ? (x) : (y))
+#define surf_size_struct SVGA3dSize
+#define u32 uint32
+
+#endif /* __KERNEL__ */
+
+#include "svga3d_reg.h"
+
+/*
+ * enum svga3d_block_desc describes the active data channels in a block.
+ *
+ * There can be at-most four active channels in a block:
+ *    1. Red, bump W, luminance and depth are stored in the first channel.
+ *    2. Green, bump V and stencil are stored in the second channel.
+ *    3. Blue and bump U are stored in the third channel.
+ *    4. Alpha and bump Q are stored in the fourth channel.
+ *
+ * Block channels can be used to store compressed and buffer data:
+ *    1. For compressed formats, only the data channel is used and its size
+ *       is equal to that of a singular block in the compression scheme.
+ *    2. For buffer formats, only the data channel is used and its size is
+ *       exactly one byte in length.
+ *    3. In each case the bit depth represent the size of a singular block.
+ *
+ * Note: Compressed and IEEE formats do not use the bitMask structure.
+ */
+
+enum svga3d_block_desc {
+       SVGA3DBLOCKDESC_NONE        = 0,         /* No channels are active */
+       SVGA3DBLOCKDESC_BLUE        = 1 << 0,    /* Block with red channel
+                                                   data */
+       SVGA3DBLOCKDESC_U           = 1 << 0,    /* Block with bump U channel
+                                                   data */
+       SVGA3DBLOCKDESC_UV_VIDEO    = 1 << 7,    /* Block with alternating video
+                                                   U and V */
+       SVGA3DBLOCKDESC_GREEN       = 1 << 1,    /* Block with green channel
+                                                   data */
+       SVGA3DBLOCKDESC_V           = 1 << 1,    /* Block with bump V channel
+                                                   data */
+       SVGA3DBLOCKDESC_STENCIL     = 1 << 1,    /* Block with a stencil
+                                                   channel */
+       SVGA3DBLOCKDESC_RED         = 1 << 2,    /* Block with blue channel
+                                                   data */
+       SVGA3DBLOCKDESC_W           = 1 << 2,    /* Block with bump W channel
+                                                   data */
+       SVGA3DBLOCKDESC_LUMINANCE   = 1 << 2,    /* Block with luminance channel
+                                                   data */
+       SVGA3DBLOCKDESC_Y           = 1 << 2,    /* Block with video luminance
+                                                   data */
+       SVGA3DBLOCKDESC_DEPTH       = 1 << 2,    /* Block with depth channel */
+       SVGA3DBLOCKDESC_ALPHA       = 1 << 3,    /* Block with an alpha
+                                                   channel */
+       SVGA3DBLOCKDESC_Q           = 1 << 3,    /* Block with bump Q channel
+                                                   data */
+       SVGA3DBLOCKDESC_BUFFER      = 1 << 4,    /* Block stores 1 byte of
+                                                   data */
+       SVGA3DBLOCKDESC_COMPRESSED  = 1 << 5,    /* Block stores n bytes of
+                                                   data depending on the
+                                                   compression method used */
+       SVGA3DBLOCKDESC_IEEE_FP     = 1 << 6,    /* Block stores data in an IEEE
+                                                   floating point
+                                                   representation in
+                                                   all channels */
+       SVGA3DBLOCKDESC_PLANAR_YUV  = 1 << 8,    /* Three separate blocks store
+                                                   data. */
+       SVGA3DBLOCKDESC_U_VIDEO     = 1 << 9,    /* Block with U video data */
+       SVGA3DBLOCKDESC_V_VIDEO     = 1 << 10,   /* Block with V video data */
+       SVGA3DBLOCKDESC_EXP         = 1 << 11,   /* Shared exponent */
+       SVGA3DBLOCKDESC_SRGB        = 1 << 12,   /* Data is in sRGB format */
+       SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13,   /* 2 planes of Y, UV,
+                                                   e.g., NV12. */
+       SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14,   /* 3 planes of separate
+                                                   Y, U, V, e.g., YV12. */
+
+       SVGA3DBLOCKDESC_RG         = SVGA3DBLOCKDESC_RED |
+       SVGA3DBLOCKDESC_GREEN,
+       SVGA3DBLOCKDESC_RGB        = SVGA3DBLOCKDESC_RG |
+       SVGA3DBLOCKDESC_BLUE,
+       SVGA3DBLOCKDESC_RGB_SRGB   = SVGA3DBLOCKDESC_RGB |
+       SVGA3DBLOCKDESC_SRGB,
+       SVGA3DBLOCKDESC_RGBA       = SVGA3DBLOCKDESC_RGB |
+       SVGA3DBLOCKDESC_ALPHA,
+       SVGA3DBLOCKDESC_RGBA_SRGB  = SVGA3DBLOCKDESC_RGBA |
+       SVGA3DBLOCKDESC_SRGB,
+       SVGA3DBLOCKDESC_UV         = SVGA3DBLOCKDESC_U |
+       SVGA3DBLOCKDESC_V,
+       SVGA3DBLOCKDESC_UVL        = SVGA3DBLOCKDESC_UV |
+       SVGA3DBLOCKDESC_LUMINANCE,
+       SVGA3DBLOCKDESC_UVW        = SVGA3DBLOCKDESC_UV |
+       SVGA3DBLOCKDESC_W,
+       SVGA3DBLOCKDESC_UVWA       = SVGA3DBLOCKDESC_UVW |
+       SVGA3DBLOCKDESC_ALPHA,
+       SVGA3DBLOCKDESC_UVWQ       = SVGA3DBLOCKDESC_U |
+       SVGA3DBLOCKDESC_V |
+       SVGA3DBLOCKDESC_W |
+       SVGA3DBLOCKDESC_Q,
+       SVGA3DBLOCKDESC_LA         = SVGA3DBLOCKDESC_LUMINANCE |
+       SVGA3DBLOCKDESC_ALPHA,
+       SVGA3DBLOCKDESC_R_FP       = SVGA3DBLOCKDESC_RED |
+       SVGA3DBLOCKDESC_IEEE_FP,
+       SVGA3DBLOCKDESC_RG_FP      = SVGA3DBLOCKDESC_R_FP |
+       SVGA3DBLOCKDESC_GREEN,
+       SVGA3DBLOCKDESC_RGB_FP     = SVGA3DBLOCKDESC_RG_FP |
+       SVGA3DBLOCKDESC_BLUE,
+       SVGA3DBLOCKDESC_RGBA_FP    = SVGA3DBLOCKDESC_RGB_FP |
+       SVGA3DBLOCKDESC_ALPHA,
+       SVGA3DBLOCKDESC_DS         = SVGA3DBLOCKDESC_DEPTH |
+       SVGA3DBLOCKDESC_STENCIL,
+       SVGA3DBLOCKDESC_YUV        = SVGA3DBLOCKDESC_UV_VIDEO |
+       SVGA3DBLOCKDESC_Y,
+       SVGA3DBLOCKDESC_AYUV       = SVGA3DBLOCKDESC_ALPHA |
+       SVGA3DBLOCKDESC_Y |
+       SVGA3DBLOCKDESC_U_VIDEO |
+       SVGA3DBLOCKDESC_V_VIDEO,
+       SVGA3DBLOCKDESC_RGBE       = SVGA3DBLOCKDESC_RGB |
+       SVGA3DBLOCKDESC_EXP,
+       SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
+       SVGA3DBLOCKDESC_SRGB,
+       SVGA3DBLOCKDESC_NV12       = SVGA3DBLOCKDESC_PLANAR_YUV |
+       SVGA3DBLOCKDESC_2PLANAR_YUV,
+       SVGA3DBLOCKDESC_YV12       = SVGA3DBLOCKDESC_PLANAR_YUV |
+       SVGA3DBLOCKDESC_3PLANAR_YUV,
+};
+
+/*
+ * SVGA3dSurfaceDesc describes the actual pixel data.
+ *
+ * This structure provides the following information:
+ *    1. Block description.
+ *    2. Dimensions of a block in the surface.
+ *    3. Size of block in bytes.
+ *    4. Bit depth of the pixel data.
+ *    5. Channel bit depths and masks (if applicable).
+ */
+#define SVGA3D_CHANNEL_DEF(type)               \
+       struct {                                \
+               union {                         \
+                       type blue;              \
+                       type u;                 \
+                       type uv_video;          \
+                       type u_video;           \
+               };                              \
+               union {                         \
+                       type green;             \
+                       type v;                 \
+                       type stencil;           \
+                       type v_video;           \
+               };                              \
+               union {                         \
+                       type red;               \
+                       type w;                 \
+                       type luminance;         \
+                       type y;                 \
+                       type depth;             \
+                       type data;              \
+               };                              \
+               union {                         \
+                       type alpha;             \
+                       type q;                 \
+                       type exp;               \
+               };                              \
+       }
+
+struct svga3d_surface_desc {
+       enum svga3d_block_desc block_desc;
+       surf_size_struct block_size;
+       u32 bytes_per_block;
+       u32 pitch_bytes_per_block;
+
+       struct {
+               u32 total;
+               SVGA3D_CHANNEL_DEF(uint8);
+       } bit_depth;
+
+       struct {
+               SVGA3D_CHANNEL_DEF(uint8);
+       } bit_offset;
+};
+
+static const struct svga3d_surface_desc svga3d_surface_descs[] = {
+       {SVGA3DBLOCKDESC_NONE,
+        {1, 1, 1},  0, 0, {0, {{0}, {0}, {0}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_FORMAT_INVALID */
+
+       {SVGA3DBLOCKDESC_RGB,
+        {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
+        {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_X8R8G8B8 */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+        {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_A8R8G8B8 */
+
+       {SVGA3DBLOCKDESC_RGB,
+        {1, 1, 1},  2, 2, {16, {{5}, {6}, {5}, {0} } },
+        {{{0}, {5}, {11}, {0} } } },    /* SVGA3D_R5G6B5 */
+
+       {SVGA3DBLOCKDESC_RGB,
+        {1, 1, 1},  2, 2, {15, {{5}, {5}, {5}, {0} } },
+        {{{0}, {5}, {10}, {0} } } },    /* SVGA3D_X1R5G5B5 */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  2, 2, {16, {{5}, {5}, {5}, {1} } },
+        {{{0}, {5}, {10}, {15} } } },   /* SVGA3D_A1R5G5B5 */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  2, 2, {16, {{4}, {4}, {4}, {4} } },
+        {{{0}, {4}, {8}, {12} } } },    /* SVGA3D_A4R4G4B4 */
+
+       {SVGA3DBLOCKDESC_DEPTH,
+        {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_Z_D32 */
+
+       {SVGA3DBLOCKDESC_DEPTH,
+        {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_Z_D16 */
+
+       {SVGA3DBLOCKDESC_DS,
+        {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
+        {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_D24S8 */
+
+       {SVGA3DBLOCKDESC_DS,
+        {1, 1, 1},  2, 2, {16, {{0}, {1}, {15}, {0} } },
+        {{{0}, {15}, {0}, {0} } } },    /* SVGA3D_Z_D15S1 */
+
+       {SVGA3DBLOCKDESC_LUMINANCE,
+        {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_LUMINANCE8 */
+
+       {SVGA3DBLOCKDESC_LA,
+        {1, 1, 1},  1, 1, {8, {{0}, {0}, {4}, {4} } },
+        {{{0}, {0}, {0}, {4} } } },     /* SVGA3D_LUMINANCE4_ALPHA4 */
+
+       {SVGA3DBLOCKDESC_LUMINANCE,
+        {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_LUMINANCE16 */
+
+       {SVGA3DBLOCKDESC_LA,
+        {1, 1, 1},  2, 2, {16, {{0}, {0}, {8}, {8} } },
+        {{{0}, {0}, {0}, {8} } } },     /* SVGA3D_LUMINANCE8_ALPHA8 */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT1 */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT2 */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT3 */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT4 */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT5 */
+
+       {SVGA3DBLOCKDESC_UV,
+        {1, 1, 1},  2, 2, {16, {{0}, {0}, {8}, {8} } },
+        {{{0}, {0}, {0}, {8} } } },     /* SVGA3D_BUMPU8V8 */
+
+       {SVGA3DBLOCKDESC_UVL,
+        {1, 1, 1},  2, 2, {16, {{5}, {5}, {6}, {0} } },
+        {{{11}, {6}, {0}, {0} } } },    /* SVGA3D_BUMPL6V5U5 */
+
+       {SVGA3DBLOCKDESC_UVL,
+        {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {0} } },
+        {{{16}, {8}, {0}, {0} } } },    /* SVGA3D_BUMPX8L8V8U8 */
+
+       {SVGA3DBLOCKDESC_UVL,
+        {1, 1, 1},  3, 3, {24, {{8}, {8}, {8}, {0} } },
+        {{{16}, {8}, {0}, {0} } } },    /* SVGA3D_BUMPL8V8U8 */
+
+       {SVGA3DBLOCKDESC_RGBA_FP,
+        {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+        {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_ARGB_S10E5 */
+
+       {SVGA3DBLOCKDESC_RGBA_FP,
+        {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
+        {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_ARGB_S23E8 */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
+        {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_A2R10G10B10 */
+
+       {SVGA3DBLOCKDESC_UV,
+        {1, 1, 1},  2, 2, {16, {{8}, {8}, {0}, {0} } },
+        {{{8}, {0}, {0}, {0} } } },     /* SVGA3D_V8U8 */
+
+       {SVGA3DBLOCKDESC_UVWQ,
+        {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+        {{{24}, {16}, {8}, {0} } } },   /* SVGA3D_Q8W8V8U8 */
+
+       {SVGA3DBLOCKDESC_UV,
+        {1, 1, 1},  2, 2, {16, {{8}, {8}, {0}, {0} } },
+        {{{8}, {0}, {0}, {0} } } },     /* SVGA3D_CxV8U8 */
+
+       {SVGA3DBLOCKDESC_UVL,
+        {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
+        {{{16}, {8}, {0}, {0} } } },    /* SVGA3D_X8L8V8U8 */
+
+       {SVGA3DBLOCKDESC_UVWA,
+        {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
+        {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_A2W10V10U10 */
+
+       {SVGA3DBLOCKDESC_ALPHA,
+        {1, 1, 1},  1, 1, {8, {{0}, {0}, {0}, {8} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_ALPHA8 */
+
+       {SVGA3DBLOCKDESC_R_FP,
+        {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R_S10E5 */
+
+       {SVGA3DBLOCKDESC_R_FP,
+        {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R_S23E8 */
+
+       {SVGA3DBLOCKDESC_RG_FP,
+        {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
+        {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_RG_S10E5 */
+
+       {SVGA3DBLOCKDESC_RG_FP,
+        {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
+        {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_RG_S23E8 */
+
+       {SVGA3DBLOCKDESC_BUFFER,
+        {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BUFFER */
+
+       {SVGA3DBLOCKDESC_DEPTH,
+        {1, 1, 1},  4, 4, {32, {{0}, {0}, {24}, {0} } },
+        {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_D24X8 */
+
+       {SVGA3DBLOCKDESC_UV,
+        {1, 1, 1},  4, 4, {32, {{16}, {16}, {0}, {0} } },
+        {{{16}, {0}, {0}, {0} } } },    /* SVGA3D_V16U16 */
+
+       {SVGA3DBLOCKDESC_RG,
+        {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
+        {{{0}, {0}, {16}, {0} } } },    /* SVGA3D_G16R16 */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+        {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_A16B16G16R16 */
+
+       {SVGA3DBLOCKDESC_YUV,
+        {1, 1, 1},  2, 2, {16, {{8}, {0}, {8}, {0} } },
+        {{{0}, {0}, {8}, {0} } } },     /* SVGA3D_UYVY */
+
+       {SVGA3DBLOCKDESC_YUV,
+        {1, 1, 1},  2, 2, {16, {{8}, {0}, {8}, {0} } },
+        {{{8}, {0}, {0}, {0} } } },     /* SVGA3D_YUY2 */
+
+       {SVGA3DBLOCKDESC_NV12,
+        {2, 2, 1},  6, 2, {48, {{0}, {0}, {48}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_NV12 */
+
+       {SVGA3DBLOCKDESC_AYUV,
+        {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+        {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_AYUV */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
+        {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_R32G32B32A32_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
+        {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_R32G32B32A32_UINT */
+
+       {SVGA3DBLOCKDESC_UVWQ,
+        {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
+        {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_R32G32B32A32_SINT */
+
+       {SVGA3DBLOCKDESC_RGB,
+        {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
+        {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RGB_FP,
+        {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
+        {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_FLOAT */
+
+       {SVGA3DBLOCKDESC_RGB,
+        {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
+        {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_UINT */
+
+       {SVGA3DBLOCKDESC_UVW,
+        {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
+        {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_SINT */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+        {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+        {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_UINT */
+
+       {SVGA3DBLOCKDESC_UVWQ,
+        {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+        {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_SNORM */
+
+       {SVGA3DBLOCKDESC_UVWQ,
+        {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+        {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_SINT */
+
+       {SVGA3DBLOCKDESC_RG,
+        {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
+        {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G32_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RG,
+        {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
+        {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G32_UINT */
+
+       {SVGA3DBLOCKDESC_UV,
+        {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
+        {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G32_SINT */
+
+       {SVGA3DBLOCKDESC_RG,
+        {1, 1, 1},  8, 8, {64, {{0}, {8}, {32}, {0} } },
+        {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G8X24_TYPELESS */
+
+       {SVGA3DBLOCKDESC_DS,
+        {1, 1, 1},  8, 8, {64, {{0}, {8}, {32}, {0} } },
+        {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_D32_FLOAT_S8X24_UINT */
+
+       {SVGA3DBLOCKDESC_R_FP,
+        {1, 1, 1},  8, 8, {64, {{0}, {0}, {32}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },    /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */
+
+       {SVGA3DBLOCKDESC_GREEN,
+        {1, 1, 1},  8, 8, {64, {{0}, {8}, {0}, {0} } },
+        {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_X32_TYPELESS_G8X24_UINT */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
+        {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_R10G10B10A2_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
+        {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_R10G10B10A2_UINT */
+
+       {SVGA3DBLOCKDESC_RGB_FP,
+        {1, 1, 1},  4, 4, {32, {{10}, {11}, {11}, {0} } },
+        {{{0}, {10}, {21}, {0} } } },  /* SVGA3D_R11G11B10_FLOAT */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+        {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+        {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_UNORM */
+
+       {SVGA3DBLOCKDESC_RGBA_SRGB,
+        {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+        {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_UNORM_SRGB */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+        {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_UINT */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+        {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_SINT */
+
+       {SVGA3DBLOCKDESC_RG,
+        {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
+        {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_R16G16_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RG_FP,
+        {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
+        {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_R16G16_UINT */
+
+       {SVGA3DBLOCKDESC_UV,
+        {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
+        {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_R16G16_SINT */
+
+       {SVGA3DBLOCKDESC_RED,
+        {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R32_TYPELESS */
+
+       {SVGA3DBLOCKDESC_DEPTH,
+        {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_D32_FLOAT */
+
+       {SVGA3DBLOCKDESC_RED,
+        {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R32_UINT */
+
+       {SVGA3DBLOCKDESC_RED,
+        {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R32_SINT */
+
+       {SVGA3DBLOCKDESC_RG,
+        {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
+        {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_R24G8_TYPELESS */
+
+       {SVGA3DBLOCKDESC_DS,
+        {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
+        {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_D24_UNORM_S8_UINT */
+
+       {SVGA3DBLOCKDESC_RED,
+        {1, 1, 1},  4, 4, {32, {{0}, {0}, {24}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R24_UNORM_X8_TYPELESS */
+
+       {SVGA3DBLOCKDESC_GREEN,
+        {1, 1, 1},  4, 4, {32, {{0}, {8}, {0}, {0} } },
+        {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_X24_TYPELESS_G8_UINT */
+
+       {SVGA3DBLOCKDESC_RG,
+        {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+        {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RG,
+        {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+        {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_UNORM */
+
+       {SVGA3DBLOCKDESC_RG,
+        {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+        {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_UINT */
+
+       {SVGA3DBLOCKDESC_UV,
+        {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+        {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_SINT */
+
+       {SVGA3DBLOCKDESC_RED,
+        {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RED,
+        {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_UNORM */
+
+       {SVGA3DBLOCKDESC_RED,
+        {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_UINT */
+
+       {SVGA3DBLOCKDESC_U,
+        {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_SNORM */
+
+       {SVGA3DBLOCKDESC_U,
+        {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_SINT */
+
+       {SVGA3DBLOCKDESC_RED,
+        {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RED,
+        {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_UNORM */
+
+       {SVGA3DBLOCKDESC_RED,
+        {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_UINT */
+
+       {SVGA3DBLOCKDESC_U,
+        {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_SNORM */
+
+       {SVGA3DBLOCKDESC_U,
+        {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_SINT */
+
+       {SVGA3DBLOCKDESC_RED,
+        {8, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R1_UNORM */
+
+       {SVGA3DBLOCKDESC_RGBE,
+        {1, 1, 1},  4, 4, {32, {{9}, {9}, {9}, {5} } },
+        {{{18}, {9}, {0}, {27} } } },   /* SVGA3D_R9G9B9E5_SHAREDEXP */
+
+       {SVGA3DBLOCKDESC_RG,
+        {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+        {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_B8G8_UNORM */
+
+       {SVGA3DBLOCKDESC_RG,
+        {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+        {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_G8R8_G8B8_UNORM */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC1_TYPELESS */
+
+       {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+        {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC1_UNORM_SRGB */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC2_TYPELESS */
+
+       {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+        {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC2_UNORM_SRGB */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC3_TYPELESS */
+
+       {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+        {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC3_UNORM_SRGB */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC4_TYPELESS */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC4_UNORM */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC4_SNORM */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC5_TYPELESS */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC5_UNORM */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC5_SNORM */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
+        {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+        {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8A8_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RGBA_SRGB,
+        {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+        {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8A8_UNORM_SRGB */
+
+       {SVGA3DBLOCKDESC_RGB,
+        {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
+        {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8X8_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RGB_SRGB,
+        {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
+        {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8X8_UNORM_SRGB */
+
+       {SVGA3DBLOCKDESC_DEPTH,
+        {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_Z_DF16 */
+
+       {SVGA3DBLOCKDESC_DS,
+        {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
+        {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_DF24 */
+
+       {SVGA3DBLOCKDESC_DS,
+        {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
+        {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_D24S8_INT */
+};
+
+static inline u32 clamped_umul32(u32 a, u32 b)
+{
+       uint64_t tmp = (uint64_t) a*b;
+       return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
+}
+
+static inline const struct svga3d_surface_desc *
+svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
+{
+       if (format < ARRAY_SIZE(svga3d_surface_descs))
+               return &svga3d_surface_descs[format];
+
+       return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * svga3dsurface_get_mip_size --
+ *
+ *      Given a base level size and the mip level, compute the size of
+ *      the mip level.
+ *
+ * Results:
+ *      See above.
+ *
+ * Side effects:
+ *      None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline surf_size_struct
+svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
+{
+       surf_size_struct size;
+
+       size.width = max_t(u32, base_level.width >> mip_level, 1);
+       size.height = max_t(u32, base_level.height >> mip_level, 1);
+       size.depth = max_t(u32, base_level.depth >> mip_level, 1);
+       return size;
+}
+
+static inline void
+svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
+                                const surf_size_struct *pixel_size,
+                                surf_size_struct *block_size)
+{
+       block_size->width = DIV_ROUND_UP(pixel_size->width,
+                                        desc->block_size.width);
+       block_size->height = DIV_ROUND_UP(pixel_size->height,
+                                         desc->block_size.height);
+       block_size->depth = DIV_ROUND_UP(pixel_size->depth,
+                                        desc->block_size.depth);
+}
+
+static inline bool
+svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
+{
+       return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
+}
+
+static inline u32
+svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
+                             const surf_size_struct *size)
+{
+       u32 pitch;
+       surf_size_struct blocks;
+
+       svga3dsurface_get_size_in_blocks(desc, size, &blocks);
+
+       pitch = blocks.width * desc->pitch_bytes_per_block;
+
+       return pitch;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * svga3dsurface_get_image_buffer_size --
+ *
+ *      Return the number of bytes of buffer space required to store
+ *      one image of a surface, optionally using the specified pitch.
+ *
+ *      If pitch is zero, it is assumed that rows are tightly packed.
+ *
+ *      This function is overflow-safe. If the result would have
+ *      overflowed, instead we return MAX_UINT32.
+ *
+ * Results:
+ *      Byte count.
+ *
+ * Side effects:
+ *      None.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+static inline u32
+svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
+                                   const surf_size_struct *size,
+                                   u32 pitch)
+{
+       surf_size_struct image_blocks;
+       u32 slice_size, total_size;
+
+       svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
+
+       if (svga3dsurface_is_planar_surface(desc)) {
+               total_size = clamped_umul32(image_blocks.width,
+                                           image_blocks.height);
+               total_size = clamped_umul32(total_size, image_blocks.depth);
+               total_size = clamped_umul32(total_size, desc->bytes_per_block);
+               return total_size;
+       }
+
+       if (pitch == 0)
+               pitch = svga3dsurface_calculate_pitch(desc, size);
+
+       slice_size = clamped_umul32(image_blocks.height, pitch);
+       total_size = clamped_umul32(slice_size, image_blocks.depth);
+
+       return total_size;
+}
+
+static inline u32
+svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
+                                 surf_size_struct base_level_size,
+                                 u32 num_mip_levels,
+                                 bool cubemap)
+{
+       const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+       u32 total_size = 0;
+       u32 mip;
+
+       for (mip = 0; mip < num_mip_levels; mip++) {
+               surf_size_struct size =
+                       svga3dsurface_get_mip_size(base_level_size, mip);
+               total_size += svga3dsurface_get_image_buffer_size(desc,
+                                                                 &size, 0);
+       }
+
+       if (cubemap)
+               total_size *= SVGA3D_MAX_SURFACE_FACES;
+
+       return total_size;
+}
+
+
+/**
+ * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
+ * in an image (or volume).
+ *
+ * @width: The image width in pixels.
+ * @height: The image height in pixels
+ */
+static inline u32
+svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
+                              u32 width, u32 height,
+                              u32 x, u32 y, u32 z)
+{
+       const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+       const u32 bw = desc->block_size.width, bh = desc->block_size.height;
+       const u32 bd = desc->block_size.depth;
+       const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
+       const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
+       const u32 offset = (z / bd * imgstride +
+                           y / bh * rowstride +
+                           x / bw * desc->bytes_per_block);
+       return offset;
+}
+
+
+static inline u32
+svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
+                              surf_size_struct baseLevelSize,
+                              u32 numMipLevels,
+                              u32 face,
+                              u32 mip)
+
+{
+       u32 offset;
+       u32 mipChainBytes;
+       u32 mipChainBytesToLevel;
+       u32 i;
+       const struct svga3d_surface_desc *desc;
+       surf_size_struct mipSize;
+       u32 bytes;
+
+       desc = svga3dsurface_get_desc(format);
+
+       mipChainBytes = 0;
+       mipChainBytesToLevel = 0;
+       for (i = 0; i < numMipLevels; i++) {
+               mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
+               bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
+               mipChainBytes += bytes;
+               if (i < mip)
+                       mipChainBytesToLevel += bytes;
+       }
+
+       offset = mipChainBytes * face + mipChainBytesToLevel;
+
+       return offset;
+}
index ef1109c..96dc84d 100644 (file)
@@ -248,13 +248,12 @@ void vmw_evict_flags(struct ttm_buffer_object *bo,
        *placement = vmw_sys_placement;
 }
 
-/**
- * FIXME: Proper access checks on buffers.
- */
-
 static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 {
-       return 0;
+       struct ttm_object_file *tfile =
+               vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
+
+       return vmw_user_dmabuf_verify_access(bo, tfile);
 }
 
 static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
new file mode 100644 (file)
index 0000000..00ae092
--- /dev/null
@@ -0,0 +1,274 @@
+/**************************************************************************
+ *
+ * Copyright Â© 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "ttm/ttm_placement.h"
+
+struct vmw_user_context {
+       struct ttm_base_object base;
+       struct vmw_resource res;
+};
+
+static void vmw_user_context_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_context_base_to_res(struct ttm_base_object *base);
+
+static uint64_t vmw_user_context_size;
+
+static const struct vmw_user_resource_conv user_context_conv = {
+       .object_type = VMW_RES_CONTEXT,
+       .base_obj_to_res = vmw_user_context_base_to_res,
+       .res_free = vmw_user_context_free
+};
+
+const struct vmw_user_resource_conv *user_context_converter =
+       &user_context_conv;
+
+
+static const struct vmw_res_func vmw_legacy_context_func = {
+       .res_type = vmw_res_context,
+       .needs_backup = false,
+       .may_evict = false,
+       .type_name = "legacy contexts",
+       .backup_placement = NULL,
+       .create = NULL,
+       .destroy = NULL,
+       .bind = NULL,
+       .unbind = NULL
+};
+
+/**
+ * Context management:
+ */
+
+static void vmw_hw_context_destroy(struct vmw_resource *res)
+{
+
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDestroyContext body;
+       } *cmd;
+
+
+       vmw_execbuf_release_pinned_bo(dev_priv);
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for surface "
+                         "destruction.\n");
+               return;
+       }
+
+       cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
+       cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+       cmd->body.cid = cpu_to_le32(res->id);
+
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       vmw_3d_resource_dec(dev_priv, false);
+}
+
+static int vmw_context_init(struct vmw_private *dev_priv,
+                           struct vmw_resource *res,
+                           void (*res_free) (struct vmw_resource *res))
+{
+       int ret;
+
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDefineContext body;
+       } *cmd;
+
+       ret = vmw_resource_init(dev_priv, res, false,
+                               res_free, &vmw_legacy_context_func);
+
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Failed to allocate a resource id.\n");
+               goto out_early;
+       }
+
+       if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
+               DRM_ERROR("Out of hw context ids.\n");
+               vmw_resource_unreference(&res);
+               return -ENOMEM;
+       }
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Fifo reserve failed.\n");
+               vmw_resource_unreference(&res);
+               return -ENOMEM;
+       }
+
+       cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
+       cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+       cmd->body.cid = cpu_to_le32(res->id);
+
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       (void) vmw_3d_resource_inc(dev_priv, false);
+       vmw_resource_activate(res, vmw_hw_context_destroy);
+       return 0;
+
+out_early:
+       if (res_free == NULL)
+               kfree(res);
+       else
+               res_free(res);
+       return ret;
+}
+
+struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
+{
+       struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
+       int ret;
+
+       if (unlikely(res == NULL))
+               return NULL;
+
+       ret = vmw_context_init(dev_priv, res, NULL);
+
+       return (ret == 0) ? res : NULL;
+}
+
+/**
+ * User-space context management:
+ */
+
+static struct vmw_resource *
+vmw_user_context_base_to_res(struct ttm_base_object *base)
+{
+       return &(container_of(base, struct vmw_user_context, base)->res);
+}
+
+static void vmw_user_context_free(struct vmw_resource *res)
+{
+       struct vmw_user_context *ctx =
+           container_of(res, struct vmw_user_context, res);
+       struct vmw_private *dev_priv = res->dev_priv;
+
+       ttm_base_object_kfree(ctx, base);
+       ttm_mem_global_free(vmw_mem_glob(dev_priv),
+                           vmw_user_context_size);
+}
+
+/**
+ * This function is called when user space has no more references on the
+ * base object. It releases the base-object's reference on the resource object.
+ */
+
+static void vmw_user_context_base_release(struct ttm_base_object **p_base)
+{
+       struct ttm_base_object *base = *p_base;
+       struct vmw_user_context *ctx =
+           container_of(base, struct vmw_user_context, base);
+       struct vmw_resource *res = &ctx->res;
+
+       *p_base = NULL;
+       vmw_resource_unreference(&res);
+}
+
+int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv)
+{
+       struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+       return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
+}
+
+int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+                            struct drm_file *file_priv)
+{
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       struct vmw_user_context *ctx;
+       struct vmw_resource *res;
+       struct vmw_resource *tmp;
+       struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       struct vmw_master *vmaster = vmw_master(file_priv->master);
+       int ret;
+
+
+       /*
+        * Approximate idr memory usage with 128 bytes. It will be limited
+        * by maximum number_of contexts anyway.
+        */
+
+       if (unlikely(vmw_user_context_size == 0))
+               vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
+
+       ret = ttm_read_lock(&vmaster->lock, true);
+       if (unlikely(ret != 0))
+               return ret;
+
+       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+                                  vmw_user_context_size,
+                                  false, true);
+       if (unlikely(ret != 0)) {
+               if (ret != -ERESTARTSYS)
+                       DRM_ERROR("Out of graphics memory for context"
+                                 " creation.\n");
+               goto out_unlock;
+       }
+
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (unlikely(ctx == NULL)) {
+               ttm_mem_global_free(vmw_mem_glob(dev_priv),
+                                   vmw_user_context_size);
+               ret = -ENOMEM;
+               goto out_unlock;
+       }
+
+       res = &ctx->res;
+       ctx->base.shareable = false;
+       ctx->base.tfile = NULL;
+
+       /*
+        * From here on, the destructor takes over resource freeing.
+        */
+
+       ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
+       if (unlikely(ret != 0))
+               goto out_unlock;
+
+       tmp = vmw_resource_reference(&ctx->res);
+       ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
+                                  &vmw_user_context_base_release, NULL);
+
+       if (unlikely(ret != 0)) {
+               vmw_resource_unreference(&tmp);
+               goto out_err;
+       }
+
+       arg->cid = ctx->base.hash.key;
+out_err:
+       vmw_resource_unreference(&res);
+out_unlock:
+       ttm_read_unlock(&vmaster->lock);
+       return ret;
+
+}
index bd78257..5fae06a 100644 (file)
@@ -60,13 +60,13 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                return ret;
 
-       vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+       vmw_execbuf_release_pinned_bo(dev_priv);
 
        ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
        if (unlikely(ret != 0))
                goto err;
 
-       ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+       ret = ttm_bo_validate(bo, placement, interruptible, false);
 
        ttm_bo_unreserve(bo);
 
@@ -105,7 +105,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
                return ret;
 
        if (pin)
-               vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+               vmw_execbuf_release_pinned_bo(dev_priv);
 
        ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
        if (unlikely(ret != 0))
@@ -123,7 +123,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
        else
                placement = &vmw_vram_gmr_placement;
 
-       ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+       ret = ttm_bo_validate(bo, placement, interruptible, false);
        if (likely(ret == 0) || ret == -ERESTARTSYS)
                goto err_unreserve;
 
@@ -138,7 +138,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
        else
                placement = &vmw_vram_placement;
 
-       ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+       ret = ttm_bo_validate(bo, placement, interruptible, false);
 
 err_unreserve:
        ttm_bo_unreserve(bo);
@@ -214,8 +214,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
                return ret;
 
        if (pin)
-               vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
-
+               vmw_execbuf_release_pinned_bo(dev_priv);
        ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
        if (unlikely(ret != 0))
                goto err_unlock;
@@ -224,10 +223,9 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
        if (bo->mem.mem_type == TTM_PL_VRAM &&
            bo->mem.start < bo->num_pages &&
            bo->mem.start > 0)
-               (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
-                                      false, false);
+               (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
 
-       ret = ttm_bo_validate(bo, &placement, interruptible, false, false);
+       ret = ttm_bo_validate(bo, &placement, interruptible, false);
 
        /* For some reason we didn't up at the start of vram */
        WARN_ON(ret == 0 && bo->offset != 0);
@@ -306,7 +304,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
 
        BUG_ON(!ttm_bo_is_reserved(bo));
        BUG_ON(old_mem_type != TTM_PL_VRAM &&
-              old_mem_type != VMW_PL_FLAG_GMR);
+              old_mem_type != VMW_PL_GMR);
 
        pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
        if (pin)
@@ -316,7 +314,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
        placement.num_placement = 1;
        placement.placement = &pl_flags;
 
-       ret = ttm_bo_validate(bo, &placement, false, true, true);
+       ret = ttm_bo_validate(bo, &placement, false, true);
 
        BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
 }
index 56973cd..161f8b2 100644 (file)
@@ -432,6 +432,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        struct vmw_private *dev_priv;
        int ret;
        uint32_t svga_id;
+       enum vmw_res_type i;
 
        dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
        if (unlikely(dev_priv == NULL)) {
@@ -448,15 +449,18 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        mutex_init(&dev_priv->cmdbuf_mutex);
        mutex_init(&dev_priv->release_mutex);
        rwlock_init(&dev_priv->resource_lock);
-       idr_init(&dev_priv->context_idr);
-       idr_init(&dev_priv->surface_idr);
-       idr_init(&dev_priv->stream_idr);
+
+       for (i = vmw_res_context; i < vmw_res_max; ++i) {
+               idr_init(&dev_priv->res_idr[i]);
+               INIT_LIST_HEAD(&dev_priv->res_lru[i]);
+       }
+
        mutex_init(&dev_priv->init_mutex);
        init_waitqueue_head(&dev_priv->fence_queue);
        init_waitqueue_head(&dev_priv->fifo_queue);
        dev_priv->fence_queue_waiters = 0;
        atomic_set(&dev_priv->fifo_queue_waiters, 0);
-       INIT_LIST_HEAD(&dev_priv->surface_lru);
+
        dev_priv->used_memory_size = 0;
 
        dev_priv->io_start = pci_resource_start(dev->pdev, 0);
@@ -670,9 +674,9 @@ out_err2:
 out_err1:
        vmw_ttm_global_release(dev_priv);
 out_err0:
-       idr_destroy(&dev_priv->surface_idr);
-       idr_destroy(&dev_priv->context_idr);
-       idr_destroy(&dev_priv->stream_idr);
+       for (i = vmw_res_context; i < vmw_res_max; ++i)
+               idr_destroy(&dev_priv->res_idr[i]);
+
        kfree(dev_priv);
        return ret;
 }
@@ -680,9 +684,12 @@ out_err0:
 static int vmw_driver_unload(struct drm_device *dev)
 {
        struct vmw_private *dev_priv = vmw_priv(dev);
+       enum vmw_res_type i;
 
        unregister_pm_notifier(&dev_priv->pm_nb);
 
+       if (dev_priv->ctx.res_ht_initialized)
+               drm_ht_remove(&dev_priv->ctx.res_ht);
        if (dev_priv->ctx.cmd_bounce)
                vfree(dev_priv->ctx.cmd_bounce);
        if (dev_priv->enable_fb) {
@@ -709,9 +716,9 @@ static int vmw_driver_unload(struct drm_device *dev)
        (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
        (void)ttm_bo_device_release(&dev_priv->bdev);
        vmw_ttm_global_release(dev_priv);
-       idr_destroy(&dev_priv->surface_idr);
-       idr_destroy(&dev_priv->context_idr);
-       idr_destroy(&dev_priv->stream_idr);
+
+       for (i = vmw_res_context; i < vmw_res_max; ++i)
+               idr_destroy(&dev_priv->res_idr[i]);
 
        kfree(dev_priv);
 
@@ -935,7 +942,7 @@ static void vmw_master_drop(struct drm_device *dev,
 
        vmw_fp->locked_master = drm_master_get(file_priv->master);
        ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
-       vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+       vmw_execbuf_release_pinned_bo(dev_priv);
 
        if (unlikely((ret != 0))) {
                DRM_ERROR("Unable to lock TTM at VT switch.\n");
@@ -987,7 +994,8 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
                 * This empties VRAM and unbinds all GMR bindings.
                 * Buffer contents is moved to swappable memory.
                 */
-               vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+               vmw_execbuf_release_pinned_bo(dev_priv);
+               vmw_resource_evict_all(dev_priv);
                ttm_bo_swapout_all(&dev_priv->bdev);
 
                break;
@@ -1084,6 +1092,11 @@ static void vmw_pm_complete(struct device *kdev)
        struct drm_device *dev = pci_get_drvdata(pdev);
        struct vmw_private *dev_priv = vmw_priv(dev);
 
+       mutex_lock(&dev_priv->hw_mutex);
+       vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
+       (void) vmw_read(dev_priv, SVGA_REG_ID);
+       mutex_unlock(&dev_priv->hw_mutex);
+
        /**
         * Reclaim 3d reference held by fbdev and potentially
         * start fifo.
index 7c6f6e3..13aeda7 100644 (file)
@@ -67,31 +67,46 @@ struct vmw_fpriv {
 
 struct vmw_dma_buffer {
        struct ttm_buffer_object base;
-       struct list_head validate_list;
-       bool gmr_bound;
-       uint32_t cur_validate_node;
-       bool on_validate_list;
+       struct list_head res_list;
 };
 
+/**
+ * struct vmw_validate_buffer - Carries validation info about buffers.
+ *
+ * @base: Validation info for TTM.
+ * @hash: Hash entry for quick lookup of the TTM buffer object.
+ *
+ * This structure contains also driver private validation info
+ * on top of the info needed by TTM.
+ */
+struct vmw_validate_buffer {
+       struct ttm_validate_buffer base;
+       struct drm_hash_item hash;
+};
+
+struct vmw_res_func;
 struct vmw_resource {
        struct kref kref;
        struct vmw_private *dev_priv;
-       struct idr *idr;
        int id;
-       enum ttm_object_type res_type;
        bool avail;
-       void (*remove_from_lists) (struct vmw_resource *res);
-       void (*hw_destroy) (struct vmw_resource *res);
+       unsigned long backup_size;
+       bool res_dirty; /* Protected by backup buffer reserved */
+       bool backup_dirty; /* Protected by backup buffer reserved */
+       struct vmw_dma_buffer *backup;
+       unsigned long backup_offset;
+       const struct vmw_res_func *func;
+       struct list_head lru_head; /* Protected by the resource lock */
+       struct list_head mob_head; /* Protected by @backup reserved */
        void (*res_free) (struct vmw_resource *res);
-       struct list_head validate_head;
-       struct list_head query_head; /* Protected by the cmdbuf mutex */
-       /* TODO is a generic snooper needed? */
-#if 0
-       void (*snoop)(struct vmw_resource *res,
-                     struct ttm_object_file *tfile,
-                     SVGA3dCmdHeader *header);
-       void *snoop_priv;
-#endif
+       void (*hw_destroy) (struct vmw_resource *res);
+};
+
+enum vmw_res_type {
+       vmw_res_context,
+       vmw_res_surface,
+       vmw_res_stream,
+       vmw_res_max
 };
 
 struct vmw_cursor_snooper {
@@ -105,20 +120,18 @@ struct vmw_surface_offset;
 
 struct vmw_surface {
        struct vmw_resource res;
-       struct list_head lru_head; /* Protected by the resource lock */
        uint32_t flags;
        uint32_t format;
        uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+       struct drm_vmw_size base_size;
        struct drm_vmw_size *sizes;
        uint32_t num_sizes;
-
        bool scanout;
-
        /* TODO so far just a extra pointer */
        struct vmw_cursor_snooper snooper;
-       struct ttm_buffer_object *backup;
        struct vmw_surface_offset *offsets;
-       uint32_t backup_size;
+       SVGA3dTextureFilter autogen_filter;
+       uint32_t multisample_count;
 };
 
 struct vmw_marker_queue {
@@ -145,29 +158,46 @@ struct vmw_relocation {
        uint32_t index;
 };
 
+/**
+ * struct vmw_res_cache_entry - resource information cache entry
+ *
+ * @valid: Whether the entry is valid, which also implies that the execbuf
+ * code holds a reference to the resource, and it's placed on the
+ * validation list.
+ * @handle: User-space handle of a resource.
+ * @res: Non-ref-counted pointer to the resource.
+ *
+ * Used to avoid frequent repeated user-space handle lookups of the
+ * same resource.
+ */
+struct vmw_res_cache_entry {
+       bool valid;
+       uint32_t handle;
+       struct vmw_resource *res;
+       struct vmw_resource_val_node *node;
+};
+
 struct vmw_sw_context{
-       struct ida bo_list;
-       uint32_t last_cid;
-       bool cid_valid;
+       struct drm_open_hash res_ht;
+       bool res_ht_initialized;
        bool kernel; /**< is the called made from the kernel */
-       struct vmw_resource *cur_ctx;
-       uint32_t last_sid;
-       uint32_t sid_translation;
-       bool sid_valid;
        struct ttm_object_file *tfile;
        struct list_head validate_nodes;
        struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
        uint32_t cur_reloc;
-       struct ttm_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
+       struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
        uint32_t cur_val_buf;
        uint32_t *cmd_bounce;
        uint32_t cmd_bounce_size;
        struct list_head resource_list;
        uint32_t fence_flags;
-       struct list_head query_list;
        struct ttm_buffer_object *cur_query_bo;
-       uint32_t cur_query_cid;
-       bool query_cid_valid;
+       struct list_head res_relocations;
+       uint32_t *buf_start;
+       struct vmw_res_cache_entry res_cache[vmw_res_max];
+       struct vmw_resource *last_query_ctx;
+       bool needs_post_query_barrier;
+       struct vmw_resource *error_resource;
 };
 
 struct vmw_legacy_display;
@@ -242,10 +272,7 @@ struct vmw_private {
         */
 
        rwlock_t resource_lock;
-       struct idr context_idr;
-       struct idr surface_idr;
-       struct idr stream_idr;
-
+       struct idr res_idr[vmw_res_max];
        /*
         * Block lastclose from racing with firstopen.
         */
@@ -320,6 +347,7 @@ struct vmw_private {
        struct ttm_buffer_object *dummy_query_bo;
        struct ttm_buffer_object *pinned_bo;
        uint32_t query_cid;
+       uint32_t query_cid_valid;
        bool dummy_query_bo_pinned;
 
        /*
@@ -329,10 +357,15 @@ struct vmw_private {
         * protected by the cmdbuf mutex for simplicity.
         */
 
-       struct list_head surface_lru;
+       struct list_head res_lru[vmw_res_max];
        uint32_t used_memory_size;
 };
 
+static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
+{
+       return container_of(res, struct vmw_surface, res);
+}
+
 static inline struct vmw_private *vmw_priv(struct drm_device *dev)
 {
        return (struct vmw_private *)dev->dev_private;
@@ -381,10 +414,16 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
 /**
  * Resource utilities - vmwgfx_resource.c
  */
+struct vmw_user_resource_conv;
+extern const struct vmw_user_resource_conv *user_surface_converter;
+extern const struct vmw_user_resource_conv *user_context_converter;
 
 extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
 extern void vmw_resource_unreference(struct vmw_resource **p_res);
 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
+extern int vmw_resource_validate(struct vmw_resource *res);
+extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
+extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
 extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
                                     struct drm_file *file_priv);
 extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
@@ -398,14 +437,13 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
                                  uint32_t handle,
                                  struct vmw_surface **out_surf,
                                  struct vmw_dma_buffer **out_buf);
+extern int vmw_user_resource_lookup_handle(
+       struct vmw_private *dev_priv,
+       struct ttm_object_file *tfile,
+       uint32_t handle,
+       const struct vmw_user_resource_conv *converter,
+       struct vmw_resource **p_res);
 extern void vmw_surface_res_free(struct vmw_resource *res);
-extern int vmw_surface_init(struct vmw_private *dev_priv,
-                           struct vmw_surface *srf,
-                           void (*res_free) (struct vmw_resource *res));
-extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
-                                         struct ttm_object_file *tfile,
-                                         uint32_t handle,
-                                         struct vmw_surface **out);
 extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
                                     struct drm_file *file_priv);
 extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
@@ -423,6 +461,8 @@ extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
                           size_t size, struct ttm_placement *placement,
                           bool interuptable,
                           void (*bo_free) (struct ttm_buffer_object *bo));
+extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
+                                 struct ttm_object_file *tfile);
 extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv);
 extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
@@ -440,7 +480,14 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
                                  struct ttm_object_file *tfile,
                                  uint32_t *inout_id,
                                  struct vmw_resource **out);
-extern void vmw_resource_unreserve(struct list_head *list);
+extern void vmw_resource_unreserve(struct vmw_resource *res,
+                                  struct vmw_dma_buffer *new_backup,
+                                  unsigned long new_backup_offset);
+extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
+                                    struct ttm_mem_reg *mem);
+extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
+                               struct vmw_fence_obj *fence);
+extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
 
 /**
  * DMA buffer helper routines - vmwgfx_dmabuf.c
@@ -538,10 +585,9 @@ extern int vmw_execbuf_process(struct drm_file *file_priv,
                               struct drm_vmw_fence_rep __user
                               *user_fence_rep,
                               struct vmw_fence_obj **out_fence);
-
-extern void
-vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
-                             bool only_on_cid_match, uint32_t cid);
+extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+                                           struct vmw_fence_obj *fence);
+extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
 
 extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
                                      struct vmw_private *dev_priv,
index e5775a0..394e647 100644 (file)
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_placement.h>
 
+#define VMW_RES_HT_ORDER 12
+
+/**
+ * struct vmw_resource_relocation - Relocation info for resources
+ *
+ * @head: List head for the software context's relocation list.
+ * @res: Non-ref-counted pointer to the resource.
+ * @offset: Offset of 4 byte entries into the command buffer where the
+ * id that needs fixup is located.
+ */
+struct vmw_resource_relocation {
+       struct list_head head;
+       const struct vmw_resource *res;
+       unsigned long offset;
+};
+
+/**
+ * struct vmw_resource_val_node - Validation info for resources
+ *
+ * @head: List head for the software context's resource list.
+ * @hash: Hash entry for quick resouce to val_node lookup.
+ * @res: Ref-counted pointer to the resource.
+ * @switch_backup: Boolean whether to switch backup buffer on unreserve.
+ * @new_backup: Refcounted pointer to the new backup buffer.
+ * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
+ * @first_usage: Set to true the first time the resource is referenced in
+ * the command stream.
+ * @no_buffer_needed: Resources do not need to allocate buffer backup on
+ * reservation. The command stream will provide one.
+ */
+struct vmw_resource_val_node {
+       struct list_head head;
+       struct drm_hash_item hash;
+       struct vmw_resource *res;
+       struct vmw_dma_buffer *new_backup;
+       unsigned long new_backup_offset;
+       bool first_usage;
+       bool no_buffer_needed;
+};
+
+/**
+ * vmw_resource_unreserve - unreserve resources previously reserved for
+ * command submission.
+ *
+ * @list_head: list of resources to unreserve.
+ * @backoff: Whether command submission failed.
+ */
+static void vmw_resource_list_unreserve(struct list_head *list,
+                                       bool backoff)
+{
+       struct vmw_resource_val_node *val;
+
+       list_for_each_entry(val, list, head) {
+               struct vmw_resource *res = val->res;
+               struct vmw_dma_buffer *new_backup =
+                       backoff ? NULL : val->new_backup;
+
+               vmw_resource_unreserve(res, new_backup,
+                       val->new_backup_offset);
+               vmw_dmabuf_unreference(&val->new_backup);
+       }
+}
+
+
+/**
+ * vmw_resource_val_add - Add a resource to the software context's
+ * resource list if it's not already on it.
+ *
+ * @sw_context: Pointer to the software context.
+ * @res: Pointer to the resource.
+ * @p_node On successful return points to a valid pointer to a
+ * struct vmw_resource_val_node, if non-NULL on entry.
+ */
+static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
+                               struct vmw_resource *res,
+                               struct vmw_resource_val_node **p_node)
+{
+       struct vmw_resource_val_node *node;
+       struct drm_hash_item *hash;
+       int ret;
+
+       if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
+                                   &hash) == 0)) {
+               node = container_of(hash, struct vmw_resource_val_node, hash);
+               node->first_usage = false;
+               if (unlikely(p_node != NULL))
+                       *p_node = node;
+               return 0;
+       }
+
+       node = kzalloc(sizeof(*node), GFP_KERNEL);
+       if (unlikely(node == NULL)) {
+               DRM_ERROR("Failed to allocate a resource validation "
+                         "entry.\n");
+               return -ENOMEM;
+       }
+
+       node->hash.key = (unsigned long) res;
+       ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Failed to initialize a resource validation "
+                         "entry.\n");
+               kfree(node);
+               return ret;
+       }
+       list_add_tail(&node->head, &sw_context->resource_list);
+       node->res = vmw_resource_reference(res);
+       node->first_usage = true;
+
+       if (unlikely(p_node != NULL))
+               *p_node = node;
+
+       return 0;
+}
+
+/**
+ * vmw_resource_relocation_add - Add a relocation to the relocation list
+ *
+ * @list: Pointer to head of relocation list.
+ * @res: The resource.
+ * @offset: Offset into the command buffer currently being parsed where the
+ * id that needs fixup is located. Granularity is 4 bytes.
+ */
+static int vmw_resource_relocation_add(struct list_head *list,
+                                      const struct vmw_resource *res,
+                                      unsigned long offset)
+{
+       struct vmw_resource_relocation *rel;
+
+       rel = kmalloc(sizeof(*rel), GFP_KERNEL);
+       if (unlikely(rel == NULL)) {
+               DRM_ERROR("Failed to allocate a resource relocation.\n");
+               return -ENOMEM;
+       }
+
+       rel->res = res;
+       rel->offset = offset;
+       list_add_tail(&rel->head, list);
+
+       return 0;
+}
+
+/**
+ * vmw_resource_relocations_free - Free all relocations on a list
+ *
+ * @list: Pointer to the head of the relocation list.
+ */
+static void vmw_resource_relocations_free(struct list_head *list)
+{
+       struct vmw_resource_relocation *rel, *n;
+
+       list_for_each_entry_safe(rel, n, list, head) {
+               list_del(&rel->head);
+               kfree(rel);
+       }
+}
+
+/**
+ * vmw_resource_relocations_apply - Apply all relocations on a list
+ *
+ * @cb: Pointer to the start of the command buffer bein patch. This need
+ * not be the same buffer as the one being parsed when the relocation
+ * list was built, but the contents must be the same modulo the
+ * resource ids.
+ * @list: Pointer to the head of the relocation list.
+ */
+static void vmw_resource_relocations_apply(uint32_t *cb,
+                                          struct list_head *list)
+{
+       struct vmw_resource_relocation *rel;
+
+       list_for_each_entry(rel, list, head)
+               cb[rel->offset] = rel->res->id;
+}
+
 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
                           struct vmw_sw_context *sw_context,
                           SVGA3dCmdHeader *header)
@@ -44,25 +219,11 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
        return 0;
 }
 
-static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
-                                         struct vmw_resource **p_res)
-{
-       struct vmw_resource *res = *p_res;
-
-       if (list_empty(&res->validate_head)) {
-               list_add_tail(&res->validate_head, &sw_context->resource_list);
-               *p_res = NULL;
-       } else
-               vmw_resource_unreference(p_res);
-}
-
 /**
  * vmw_bo_to_validate_list - add a bo to a validate list
  *
  * @sw_context: The software context used for this command submission batch.
  * @bo: The buffer object to add.
- * @fence_flags: Fence flags to be or'ed with any other fence flags for
- * this buffer on this submission batch.
  * @p_val_node: If non-NULL Will be updated with the validate node number
  * on return.
  *
@@ -74,21 +235,37 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
                                   uint32_t *p_val_node)
 {
        uint32_t val_node;
+       struct vmw_validate_buffer *vval_buf;
        struct ttm_validate_buffer *val_buf;
+       struct drm_hash_item *hash;
+       int ret;
 
-       val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
-
-       if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
-               DRM_ERROR("Max number of DMA buffers per submission"
-                         " exceeded.\n");
-               return -EINVAL;
-       }
-
-       val_buf = &sw_context->val_bufs[val_node];
-       if (unlikely(val_node == sw_context->cur_val_buf)) {
+       if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
+                                   &hash) == 0)) {
+               vval_buf = container_of(hash, struct vmw_validate_buffer,
+                                       hash);
+               val_buf = &vval_buf->base;
+               val_node = vval_buf - sw_context->val_bufs;
+       } else {
+               val_node = sw_context->cur_val_buf;
+               if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
+                       DRM_ERROR("Max number of DMA buffers per submission "
+                                 "exceeded.\n");
+                       return -EINVAL;
+               }
+               vval_buf = &sw_context->val_bufs[val_node];
+               vval_buf->hash.key = (unsigned long) bo;
+               ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
+               if (unlikely(ret != 0)) {
+                       DRM_ERROR("Failed to initialize a buffer validation "
+                                 "entry.\n");
+                       return ret;
+               }
+               ++sw_context->cur_val_buf;
+               val_buf = &vval_buf->base;
                val_buf->bo = ttm_bo_reference(bo);
+               val_buf->reserved = false;
                list_add_tail(&val_buf->head, &sw_context->validate_nodes);
-               ++sw_context->cur_val_buf;
        }
 
        sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
@@ -99,85 +276,174 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
        return 0;
 }
 
-static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
-                            struct vmw_sw_context *sw_context,
-                            SVGA3dCmdHeader *header)
+/**
+ * vmw_resources_reserve - Reserve all resources on the sw_context's
+ * resource list.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Note that since vmware's command submission currently is protected by
+ * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
+ * since only a single thread at once will attempt this.
+ */
+static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
 {
-       struct vmw_resource *ctx;
-
-       struct vmw_cid_cmd {
-               SVGA3dCmdHeader header;
-               __le32 cid;
-       } *cmd;
+       struct vmw_resource_val_node *val;
        int ret;
 
-       cmd = container_of(header, struct vmw_cid_cmd, header);
-       if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
-               return 0;
+       list_for_each_entry(val, &sw_context->resource_list, head) {
+               struct vmw_resource *res = val->res;
 
-       ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid,
-                               &ctx);
-       if (unlikely(ret != 0)) {
-               DRM_ERROR("Could not find or use context %u\n",
-                         (unsigned) cmd->cid);
-               return ret;
+               ret = vmw_resource_reserve(res, val->no_buffer_needed);
+               if (unlikely(ret != 0))
+                       return ret;
+
+               if (res->backup) {
+                       struct ttm_buffer_object *bo = &res->backup->base;
+
+                       ret = vmw_bo_to_validate_list
+                               (sw_context, bo, NULL);
+
+                       if (unlikely(ret != 0))
+                               return ret;
+               }
        }
+       return 0;
+}
 
-       sw_context->last_cid = cmd->cid;
-       sw_context->cid_valid = true;
-       sw_context->cur_ctx = ctx;
-       vmw_resource_to_validate_list(sw_context, &ctx);
+/**
+ * vmw_resources_validate - Validate all resources on the sw_context's
+ * resource list.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Before this function is called, all resource backup buffers must have
+ * been validated.
+ */
+static int vmw_resources_validate(struct vmw_sw_context *sw_context)
+{
+       struct vmw_resource_val_node *val;
+       int ret;
+
+       list_for_each_entry(val, &sw_context->resource_list, head) {
+               struct vmw_resource *res = val->res;
 
+               ret = vmw_resource_validate(res);
+               if (unlikely(ret != 0)) {
+                       if (ret != -ERESTARTSYS)
+                               DRM_ERROR("Failed to validate resource.\n");
+                       return ret;
+               }
+       }
        return 0;
 }
 
-static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
+/**
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * on the resource validate list unless it's already there.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @converter: User-space visisble type specific information.
+ * @id: Pointer to the location in the command buffer currently being
+ * parsed from where the user-space resource id handle is located.
+ */
+static int vmw_cmd_res_check(struct vmw_private *dev_priv,
                             struct vmw_sw_context *sw_context,
-                            uint32_t *sid)
+                            enum vmw_res_type res_type,
+                            const struct vmw_user_resource_conv *converter,
+                            uint32_t *id,
+                            struct vmw_resource_val_node **p_val)
 {
-       struct vmw_surface *srf;
-       int ret;
+       struct vmw_res_cache_entry *rcache =
+               &sw_context->res_cache[res_type];
        struct vmw_resource *res;
+       struct vmw_resource_val_node *node;
+       int ret;
 
-       if (*sid == SVGA3D_INVALID_ID)
+       if (*id == SVGA3D_INVALID_ID)
                return 0;
 
-       if (likely((sw_context->sid_valid  &&
-                     *sid == sw_context->last_sid))) {
-               *sid = sw_context->sid_translation;
-               return 0;
-       }
+       /*
+        * Fastpath in case of repeated commands referencing the same
+        * resource
+        */
 
-       ret = vmw_user_surface_lookup_handle(dev_priv,
-                                            sw_context->tfile,
-                                            *sid, &srf);
-       if (unlikely(ret != 0)) {
-               DRM_ERROR("Could ot find or use surface 0x%08x "
-                         "address 0x%08lx\n",
-                         (unsigned int) *sid,
-                         (unsigned long) sid);
-               return ret;
+       if (likely(rcache->valid && *id == rcache->handle)) {
+               const struct vmw_resource *res = rcache->res;
+
+               rcache->node->first_usage = false;
+               if (p_val)
+                       *p_val = rcache->node;
+
+               return vmw_resource_relocation_add
+                       (&sw_context->res_relocations, res,
+                        id - sw_context->buf_start);
        }
 
-       ret = vmw_surface_validate(dev_priv, srf);
+       ret = vmw_user_resource_lookup_handle(dev_priv,
+                                             sw_context->tfile,
+                                             *id,
+                                             converter,
+                                             &res);
        if (unlikely(ret != 0)) {
-               if (ret != -ERESTARTSYS)
-                       DRM_ERROR("Could not validate surface.\n");
-               vmw_surface_unreference(&srf);
+               DRM_ERROR("Could not find or use resource 0x%08x.\n",
+                         (unsigned) *id);
+               dump_stack();
                return ret;
        }
 
-       sw_context->last_sid = *sid;
-       sw_context->sid_valid = true;
-       sw_context->sid_translation = srf->res.id;
-       *sid = sw_context->sid_translation;
+       rcache->valid = true;
+       rcache->res = res;
+       rcache->handle = *id;
 
-       res = &srf->res;
-       vmw_resource_to_validate_list(sw_context, &res);
+       ret = vmw_resource_relocation_add(&sw_context->res_relocations,
+                                         res,
+                                         id - sw_context->buf_start);
+       if (unlikely(ret != 0))
+               goto out_no_reloc;
+
+       ret = vmw_resource_val_add(sw_context, res, &node);
+       if (unlikely(ret != 0))
+               goto out_no_reloc;
 
+       rcache->node = node;
+       if (p_val)
+               *p_val = node;
+       vmw_resource_unreference(&res);
        return 0;
+
+out_no_reloc:
+       BUG_ON(sw_context->error_resource != NULL);
+       sw_context->error_resource = res;
+
+       return ret;
 }
 
+/**
+ * vmw_cmd_cid_check - Check a command header for valid context information.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @header: A command header with an embedded user-space context handle.
+ *
+ * Convenience function: Call vmw_cmd_res_check with the user-space context
+ * handle embedded in @header.
+ */
+static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
+                            struct vmw_sw_context *sw_context,
+                            SVGA3dCmdHeader *header)
+{
+       struct vmw_cid_cmd {
+               SVGA3dCmdHeader header;
+               __le32 cid;
+       } *cmd;
+
+       cmd = container_of(header, struct vmw_cid_cmd, header);
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+                                user_context_converter, &cmd->cid, NULL);
+}
 
 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
                                           struct vmw_sw_context *sw_context,
@@ -194,7 +460,9 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
                return ret;
 
        cmd = container_of(header, struct vmw_sid_cmd, header);
-       ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
+       ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                               user_surface_converter,
+                               &cmd->body.target.sid, NULL);
        return ret;
 }
 
@@ -209,10 +477,14 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
        int ret;
 
        cmd = container_of(header, struct vmw_sid_cmd, header);
-       ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+       ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                               user_surface_converter,
+                               &cmd->body.src.sid, NULL);
        if (unlikely(ret != 0))
                return ret;
-       return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                user_surface_converter,
+                                &cmd->body.dest.sid, NULL);
 }
 
 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
@@ -226,10 +498,14 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
        int ret;
 
        cmd = container_of(header, struct vmw_sid_cmd, header);
-       ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+       ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                               user_surface_converter,
+                               &cmd->body.src.sid, NULL);
        if (unlikely(ret != 0))
                return ret;
-       return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                user_surface_converter,
+                                &cmd->body.dest.sid, NULL);
 }
 
 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
@@ -248,7 +524,9 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
                return -EPERM;
        }
 
-       return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                user_surface_converter,
+                                &cmd->body.srcImage.sid, NULL);
 }
 
 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
@@ -268,14 +546,15 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
                return -EPERM;
        }
 
-       return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                user_surface_converter, &cmd->body.sid,
+                                NULL);
 }
 
 /**
  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
  *
  * @dev_priv: The device private structure.
- * @cid: The hardware context for the next query.
  * @new_query_bo: The new buffer holding query results.
  * @sw_context: The software context used for this command submission.
  *
@@ -283,18 +562,18 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
  * query results, and if another buffer currently is pinned for query
  * results. If so, the function prepares the state of @sw_context for
  * switching pinned buffers after successful submission of the current
- * command batch. It also checks whether we're using a new query context.
- * In that case, it makes sure we emit a query barrier for the old
- * context before the current query buffer is fenced.
+ * command batch.
  */
 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
-                                      uint32_t cid,
                                       struct ttm_buffer_object *new_query_bo,
                                       struct vmw_sw_context *sw_context)
 {
+       struct vmw_res_cache_entry *ctx_entry =
+               &sw_context->res_cache[vmw_res_context];
        int ret;
-       bool add_cid = false;
-       uint32_t cid_to_add;
+
+       BUG_ON(!ctx_entry->valid);
+       sw_context->last_query_ctx = ctx_entry->res;
 
        if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
 
@@ -304,9 +583,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
                }
 
                if (unlikely(sw_context->cur_query_bo != NULL)) {
-                       BUG_ON(!sw_context->query_cid_valid);
-                       add_cid = true;
-                       cid_to_add = sw_context->cur_query_cid;
+                       sw_context->needs_post_query_barrier = true;
                        ret = vmw_bo_to_validate_list(sw_context,
                                                      sw_context->cur_query_bo,
                                                      NULL);
@@ -323,27 +600,6 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 
        }
 
-       if (unlikely(cid != sw_context->cur_query_cid &&
-                    sw_context->query_cid_valid)) {
-               add_cid = true;
-               cid_to_add = sw_context->cur_query_cid;
-       }
-
-       sw_context->cur_query_cid = cid;
-       sw_context->query_cid_valid = true;
-
-       if (add_cid) {
-               struct vmw_resource *ctx = sw_context->cur_ctx;
-
-               if (list_empty(&ctx->query_head))
-                       list_add_tail(&ctx->query_head,
-                                     &sw_context->query_list);
-               ret = vmw_bo_to_validate_list(sw_context,
-                                             dev_priv->dummy_query_bo,
-                                             NULL);
-               if (unlikely(ret != 0))
-                       return ret;
-       }
        return 0;
 }
 
@@ -355,10 +611,9 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
  * @sw_context: The software context used for this command submission batch.
  *
  * This function will check if we're switching query buffers, and will then,
- * if no other query waits are issued this command submission batch,
  * issue a dummy occlusion query wait used as a query barrier. When the fence
  * object following that query wait has signaled, we are sure that all
- * preseding queries have finished, and the old query buffer can be unpinned.
+ * preceding queries have finished, and the old query buffer can be unpinned.
  * However, since both the new query buffer and the old one are fenced with
  * that fence, we can do an asynchronus unpin now, and be sure that the
  * old query buffer won't be moved until the fence has signaled.
@@ -369,20 +624,19 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
                                     struct vmw_sw_context *sw_context)
 {
-
-       struct vmw_resource *ctx, *next_ctx;
-       int ret;
-
        /*
         * The validate list should still hold references to all
         * contexts here.
         */
 
-       list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list,
-                                query_head) {
-               list_del_init(&ctx->query_head);
+       if (sw_context->needs_post_query_barrier) {
+               struct vmw_res_cache_entry *ctx_entry =
+                       &sw_context->res_cache[vmw_res_context];
+               struct vmw_resource *ctx;
+               int ret;
 
-               BUG_ON(list_empty(&ctx->validate_head));
+               BUG_ON(!ctx_entry->valid);
+               ctx = ctx_entry->res;
 
                ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
 
@@ -396,40 +650,46 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
                        ttm_bo_unref(&dev_priv->pinned_bo);
                }
 
-               vmw_bo_pin(sw_context->cur_query_bo, true);
+               if (!sw_context->needs_post_query_barrier) {
+                       vmw_bo_pin(sw_context->cur_query_bo, true);
 
-               /*
-                * We pin also the dummy_query_bo buffer so that we
-                * don't need to validate it when emitting
-                * dummy queries in context destroy paths.
-                */
+                       /*
+                        * We pin also the dummy_query_bo buffer so that we
+                        * don't need to validate it when emitting
+                        * dummy queries in context destroy paths.
+                        */
 
-               vmw_bo_pin(dev_priv->dummy_query_bo, true);
-               dev_priv->dummy_query_bo_pinned = true;
+                       vmw_bo_pin(dev_priv->dummy_query_bo, true);
+                       dev_priv->dummy_query_bo_pinned = true;
 
-               dev_priv->query_cid = sw_context->cur_query_cid;
-               dev_priv->pinned_bo =
-                       ttm_bo_reference(sw_context->cur_query_bo);
+                       BUG_ON(sw_context->last_query_ctx == NULL);
+                       dev_priv->query_cid = sw_context->last_query_ctx->id;
+                       dev_priv->query_cid_valid = true;
+                       dev_priv->pinned_bo =
+                               ttm_bo_reference(sw_context->cur_query_bo);
+               }
        }
 }
 
 /**
- * vmw_query_switch_backoff - clear query barrier list
- * @sw_context: The sw context used for this submission batch.
+ * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
+ * handle to a valid SVGAGuestPtr
  *
- * This function is used as part of an error path, where a previously
- * set up list of query barriers needs to be cleared.
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: The software context used for this command batch validation.
+ * @ptr: Pointer to the user-space handle to be translated.
+ * @vmw_bo_p: Points to a location that, on successful return will carry
+ * a reference-counted pointer to the DMA buffer identified by the
+ * user-space handle in @id.
  *
+ * This function saves information needed to translate a user-space buffer
+ * handle to a valid SVGAGuestPtr. The translation does not take place
+ * immediately, but during a call to vmw_apply_relocations().
+ * This function builds a relocation list and a list of buffers to validate.
+ * The former needs to be freed using either vmw_apply_relocations() or
+ * vmw_free_relocations(). The latter needs to be freed using
+ * vmw_clear_validations.
  */
-static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context)
-{
-       struct list_head *list, *next;
-
-       list_for_each_safe(list, next, &sw_context->query_list) {
-               list_del_init(list);
-       }
-}
-
 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
                                   struct vmw_sw_context *sw_context,
                                   SVGAGuestPtr *ptr,
@@ -471,6 +731,37 @@ out_no_reloc:
        return ret;
 }
 
+/**
+ * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
+                              struct vmw_sw_context *sw_context,
+                              SVGA3dCmdHeader *header)
+{
+       struct vmw_begin_query_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdBeginQuery q;
+       } *cmd;
+
+       cmd = container_of(header, struct vmw_begin_query_cmd,
+                          header);
+
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+                                user_context_converter, &cmd->q.cid,
+                                NULL);
+}
+
+/**
+ * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
                             struct vmw_sw_context *sw_context,
                             SVGA3dCmdHeader *header)
@@ -493,13 +784,19 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                return ret;
 
-       ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid,
-                                         &vmw_bo->base, sw_context);
+       ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
 
        vmw_dmabuf_unreference(&vmw_bo);
        return ret;
 }
 
+/*
+ * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
                              struct vmw_sw_context *sw_context,
                              SVGA3dCmdHeader *header)
@@ -510,7 +807,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
                SVGA3dCmdWaitForQuery q;
        } *cmd;
        int ret;
-       struct vmw_resource *ctx;
 
        cmd = container_of(header, struct vmw_query_cmd, header);
        ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
@@ -524,16 +820,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
                return ret;
 
        vmw_dmabuf_unreference(&vmw_bo);
-
-       /*
-        * This wait will act as a barrier for previous waits for this
-        * context.
-        */
-
-       ctx = sw_context->cur_ctx;
-       if (!list_empty(&ctx->query_head))
-               list_del_init(&ctx->query_head);
-
        return 0;
 }
 
@@ -542,14 +828,12 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
                       SVGA3dCmdHeader *header)
 {
        struct vmw_dma_buffer *vmw_bo = NULL;
-       struct ttm_buffer_object *bo;
        struct vmw_surface *srf = NULL;
        struct vmw_dma_cmd {
                SVGA3dCmdHeader header;
                SVGA3dCmdSurfaceDMA dma;
        } *cmd;
        int ret;
-       struct vmw_resource *res;
 
        cmd = container_of(header, struct vmw_dma_cmd, header);
        ret = vmw_translate_guest_ptr(dev_priv, sw_context,
@@ -558,37 +842,20 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                return ret;
 
-       bo = &vmw_bo->base;
-       ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
-                                            cmd->dma.host.sid, &srf);
-       if (ret) {
-               DRM_ERROR("could not find surface\n");
-               goto out_no_reloc;
-       }
-
-       ret = vmw_surface_validate(dev_priv, srf);
+       ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                               user_surface_converter, &cmd->dma.host.sid,
+                               NULL);
        if (unlikely(ret != 0)) {
-               if (ret != -ERESTARTSYS)
-                       DRM_ERROR("Culd not validate surface.\n");
-               goto out_no_validate;
+               if (unlikely(ret != -ERESTARTSYS))
+                       DRM_ERROR("could not find surface for DMA.\n");
+               goto out_no_surface;
        }
 
-       /*
-        * Patch command stream with device SID.
-        */
-       cmd->dma.host.sid = srf->res.id;
-       vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
-
-       vmw_dmabuf_unreference(&vmw_bo);
-
-       res = &srf->res;
-       vmw_resource_to_validate_list(sw_context, &res);
+       srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
 
-       return 0;
+       vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
 
-out_no_validate:
-       vmw_surface_unreference(&srf);
-out_no_reloc:
+out_no_surface:
        vmw_dmabuf_unreference(&vmw_bo);
        return ret;
 }
@@ -621,8 +888,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
        }
 
        for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
-               ret = vmw_cmd_sid_check(dev_priv, sw_context,
-                                       &decl->array.surfaceId);
+               ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                       user_surface_converter,
+                                       &decl->array.surfaceId, NULL);
                if (unlikely(ret != 0))
                        return ret;
        }
@@ -636,8 +904,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
 
        range = (SVGA3dPrimitiveRange *) decl;
        for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
-               ret = vmw_cmd_sid_check(dev_priv, sw_context,
-                                       &range->indexArray.surfaceId);
+               ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                       user_surface_converter,
+                                       &range->indexArray.surfaceId, NULL);
                if (unlikely(ret != 0))
                        return ret;
        }
@@ -668,8 +937,9 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
                if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
                        continue;
 
-               ret = vmw_cmd_sid_check(dev_priv, sw_context,
-                                       &cur_state->value);
+               ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                       user_surface_converter,
+                                       &cur_state->value, NULL);
                if (unlikely(ret != 0))
                        return ret;
        }
@@ -700,6 +970,34 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
        return ret;
 }
 
+/**
+ * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
+                             struct vmw_sw_context *sw_context,
+                             SVGA3dCmdHeader *header)
+{
+       struct vmw_set_shader_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdSetShader body;
+       } *cmd;
+       int ret;
+
+       cmd = container_of(header, struct vmw_set_shader_cmd,
+                          header);
+
+       ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+       if (unlikely(ret != 0))
+               return ret;
+
+       return 0;
+}
+
 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
                                struct vmw_sw_context *sw_context,
                                void *buf, uint32_t *size)
@@ -773,16 +1071,20 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
        VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
        VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
        VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
+       VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader),
        VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
        VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
        VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
+       VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query),
        VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
        VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
        VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
        VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
-                   &vmw_cmd_blt_surf_screen_check)
+                   &vmw_cmd_blt_surf_screen_check),
+       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid),
+       VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
+       VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
+       VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
 };
 
 static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -829,6 +1131,8 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
        int32_t cur_size = size;
        int ret;
 
+       sw_context->buf_start = buf;
+
        while (cur_size > 0) {
                size = cur_size;
                ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
@@ -860,43 +1164,63 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
 
        for (i = 0; i < sw_context->cur_reloc; ++i) {
                reloc = &sw_context->relocs[i];
-               validate = &sw_context->val_bufs[reloc->index];
+               validate = &sw_context->val_bufs[reloc->index].base;
                bo = validate->bo;
-               if (bo->mem.mem_type == TTM_PL_VRAM) {
+               switch (bo->mem.mem_type) {
+               case TTM_PL_VRAM:
                        reloc->location->offset += bo->offset;
                        reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
-               } else
+                       break;
+               case VMW_PL_GMR:
                        reloc->location->gmrId = bo->mem.start;
+                       break;
+               default:
+                       BUG();
+               }
        }
        vmw_free_relocations(sw_context);
 }
 
+/**
+ * vmw_resource_list_unrefererence - Free up a resource list and unreference
+ * all resources referenced by it.
+ *
+ * @list: The resource list.
+ */
+static void vmw_resource_list_unreference(struct list_head *list)
+{
+       struct vmw_resource_val_node *val, *val_next;
+
+       /*
+        * Drop references to resources held during command submission.
+        */
+
+       list_for_each_entry_safe(val, val_next, list, head) {
+               list_del_init(&val->head);
+               vmw_resource_unreference(&val->res);
+               kfree(val);
+       }
+}
+
 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
 {
-       struct ttm_validate_buffer *entry, *next;
-       struct vmw_resource *res, *res_next;
+       struct vmw_validate_buffer *entry, *next;
+       struct vmw_resource_val_node *val;
 
        /*
         * Drop references to DMA buffers held during command submission.
         */
        list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
-                                head) {
-               list_del(&entry->head);
-               vmw_dmabuf_validate_clear(entry->bo);
-               ttm_bo_unref(&entry->bo);
+                                base.head) {
+               list_del(&entry->base.head);
+               ttm_bo_unref(&entry->base.bo);
+               (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
                sw_context->cur_val_buf--;
        }
        BUG_ON(sw_context->cur_val_buf != 0);
 
-       /*
-        * Drop references to resources held during command submission.
-        */
-       vmw_resource_unreserve(&sw_context->resource_list);
-       list_for_each_entry_safe(res, res_next, &sw_context->resource_list,
-                                validate_head) {
-               list_del_init(&res->validate_head);
-               vmw_resource_unreference(&res);
-       }
+       list_for_each_entry(val, &sw_context->resource_list, head)
+               (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
 }
 
 static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
@@ -921,7 +1245,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
         * used as a GMR, this will return -ENOMEM.
         */
 
-       ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
+       ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
        if (likely(ret == 0 || ret == -ERESTARTSYS))
                return ret;
 
@@ -931,7 +1255,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
         */
 
        DRM_INFO("Falling through to VRAM.\n");
-       ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
+       ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
        return ret;
 }
 
@@ -939,11 +1263,11 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
 static int vmw_validate_buffers(struct vmw_private *dev_priv,
                                struct vmw_sw_context *sw_context)
 {
-       struct ttm_validate_buffer *entry;
+       struct vmw_validate_buffer *entry;
        int ret;
 
-       list_for_each_entry(entry, &sw_context->validate_nodes, head) {
-               ret = vmw_validate_single_buffer(dev_priv, entry->bo);
+       list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
+               ret = vmw_validate_single_buffer(dev_priv, entry->base.bo);
                if (unlikely(ret != 0))
                        return ret;
        }
@@ -1106,6 +1430,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 {
        struct vmw_sw_context *sw_context = &dev_priv->ctx;
        struct vmw_fence_obj *fence = NULL;
+       struct vmw_resource *error_resource;
+       struct list_head resource_list;
        uint32_t handle;
        void *cmd;
        int ret;
@@ -1135,24 +1461,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
                sw_context->kernel = true;
 
        sw_context->tfile = vmw_fpriv(file_priv)->tfile;
-       sw_context->cid_valid = false;
-       sw_context->sid_valid = false;
        sw_context->cur_reloc = 0;
        sw_context->cur_val_buf = 0;
        sw_context->fence_flags = 0;
-       INIT_LIST_HEAD(&sw_context->query_list);
        INIT_LIST_HEAD(&sw_context->resource_list);
        sw_context->cur_query_bo = dev_priv->pinned_bo;
-       sw_context->cur_query_cid = dev_priv->query_cid;
-       sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL);
-
+       sw_context->last_query_ctx = NULL;
+       sw_context->needs_post_query_barrier = false;
+       memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
        INIT_LIST_HEAD(&sw_context->validate_nodes);
+       INIT_LIST_HEAD(&sw_context->res_relocations);
+       if (!sw_context->res_ht_initialized) {
+               ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
+               if (unlikely(ret != 0))
+                       goto out_unlock;
+               sw_context->res_ht_initialized = true;
+       }
 
+       INIT_LIST_HEAD(&resource_list);
        ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
                                command_size);
        if (unlikely(ret != 0))
                goto out_err;
 
+       ret = vmw_resources_reserve(sw_context);
+       if (unlikely(ret != 0))
+               goto out_err;
+
        ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
        if (unlikely(ret != 0))
                goto out_err;
@@ -1161,24 +1496,31 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        if (unlikely(ret != 0))
                goto out_err;
 
-       vmw_apply_relocations(sw_context);
+       ret = vmw_resources_validate(sw_context);
+       if (unlikely(ret != 0))
+               goto out_err;
 
        if (throttle_us) {
                ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
                                   throttle_us);
 
                if (unlikely(ret != 0))
-                       goto out_throttle;
+                       goto out_err;
        }
 
        cmd = vmw_fifo_reserve(dev_priv, command_size);
        if (unlikely(cmd == NULL)) {
                DRM_ERROR("Failed reserving fifo space for commands.\n");
                ret = -ENOMEM;
-               goto out_throttle;
+               goto out_err;
        }
 
+       vmw_apply_relocations(sw_context);
        memcpy(cmd, kernel_commands, command_size);
+
+       vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+       vmw_resource_relocations_free(&sw_context->res_relocations);
+
        vmw_fifo_commit(dev_priv, command_size);
 
        vmw_query_bo_switch_commit(dev_priv, sw_context);
@@ -1194,9 +1536,14 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        if (ret != 0)
                DRM_ERROR("Fence submission error. Syncing.\n");
 
+       vmw_resource_list_unreserve(&sw_context->resource_list, false);
        ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
                                    (void *) fence);
 
+       if (unlikely(dev_priv->pinned_bo != NULL &&
+                    !dev_priv->query_cid_valid))
+               __vmw_execbuf_release_pinned_bo(dev_priv, fence);
+
        vmw_clear_validations(sw_context);
        vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
                                    user_fence_rep, fence, handle);
@@ -1209,17 +1556,40 @@ int vmw_execbuf_process(struct drm_file *file_priv,
                vmw_fence_obj_unreference(&fence);
        }
 
+       list_splice_init(&sw_context->resource_list, &resource_list);
        mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+       /*
+        * Unreference resources outside of the cmdbuf_mutex to
+        * avoid deadlocks in resource destruction paths.
+        */
+       vmw_resource_list_unreference(&resource_list);
+
        return 0;
 
 out_err:
+       vmw_resource_relocations_free(&sw_context->res_relocations);
        vmw_free_relocations(sw_context);
-out_throttle:
-       vmw_query_switch_backoff(sw_context);
        ttm_eu_backoff_reservation(&sw_context->validate_nodes);
+       vmw_resource_list_unreserve(&sw_context->resource_list, true);
        vmw_clear_validations(sw_context);
+       if (unlikely(dev_priv->pinned_bo != NULL &&
+                    !dev_priv->query_cid_valid))
+               __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
 out_unlock:
+       list_splice_init(&sw_context->resource_list, &resource_list);
+       error_resource = sw_context->error_resource;
+       sw_context->error_resource = NULL;
        mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+       /*
+        * Unreference resources outside of the cmdbuf_mutex to
+        * avoid deadlocks in resource destruction paths.
+        */
+       vmw_resource_list_unreference(&resource_list);
+       if (unlikely(error_resource != NULL))
+               vmw_resource_unreference(&error_resource);
+
        return ret;
 }
 
@@ -1244,13 +1614,13 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
 
 
 /**
- * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
+ * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
  * query bo.
  *
  * @dev_priv: The device private structure.
- * @only_on_cid_match: Only flush and unpin if the current active query cid
- * matches @cid.
- * @cid: Optional context id to match.
+ * @fence: If non-NULL should point to a struct vmw_fence_obj issued
+ * _after_ a query barrier that flushes all queries touching the current
+ * buffer pointed to by @dev_priv->pinned_bo
  *
  * This function should be used to unpin the pinned query bo, or
  * as a query barrier when we need to make sure that all queries have
@@ -1263,23 +1633,21 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
  *
  * The function will synchronize on the previous query barrier, and will
  * thus not finish until that barrier has executed.
+ *
+ * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
+ * before calling this function.
  */
-void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
-                                  bool only_on_cid_match, uint32_t cid)
+void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+                                    struct vmw_fence_obj *fence)
 {
        int ret = 0;
        struct list_head validate_list;
        struct ttm_validate_buffer pinned_val, query_val;
-       struct vmw_fence_obj *fence;
-
-       mutex_lock(&dev_priv->cmdbuf_mutex);
+       struct vmw_fence_obj *lfence = NULL;
 
        if (dev_priv->pinned_bo == NULL)
                goto out_unlock;
 
-       if (only_on_cid_match && cid != dev_priv->query_cid)
-               goto out_unlock;
-
        INIT_LIST_HEAD(&validate_list);
 
        pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
@@ -1297,25 +1665,34 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
                goto out_no_reserve;
        }
 
-       ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
-       if (unlikely(ret != 0)) {
-               vmw_execbuf_unpin_panic(dev_priv);
-               goto out_no_emit;
+       if (dev_priv->query_cid_valid) {
+               BUG_ON(fence != NULL);
+               ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
+               if (unlikely(ret != 0)) {
+                       vmw_execbuf_unpin_panic(dev_priv);
+                       goto out_no_emit;
+               }
+               dev_priv->query_cid_valid = false;
        }
 
        vmw_bo_pin(dev_priv->pinned_bo, false);
        vmw_bo_pin(dev_priv->dummy_query_bo, false);
        dev_priv->dummy_query_bo_pinned = false;
 
-       (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+       if (fence == NULL) {
+               (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
+                                                 NULL);
+               fence = lfence;
+       }
        ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
+       if (lfence != NULL)
+               vmw_fence_obj_unreference(&lfence);
 
        ttm_bo_unref(&query_val.bo);
        ttm_bo_unref(&pinned_val.bo);
        ttm_bo_unref(&dev_priv->pinned_bo);
 
 out_unlock:
-       mutex_unlock(&dev_priv->cmdbuf_mutex);
        return;
 
 out_no_emit:
@@ -1324,6 +1701,31 @@ out_no_reserve:
        ttm_bo_unref(&query_val.bo);
        ttm_bo_unref(&pinned_val.bo);
        ttm_bo_unref(&dev_priv->pinned_bo);
+}
+
+/**
+ * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
+ * query bo.
+ *
+ * @dev_priv: The device private structure.
+ *
+ * This function should be used to unpin the pinned query bo, or
+ * as a query barrier when we need to make sure that all queries have
+ * finished before the next fifo command. (For example on hardware
+ * context destructions where the hardware may otherwise leak unfinished
+ * queries).
+ *
+ * This function does not return any failure codes, but make attempts
+ * to do safe unpinning in case of errors.
+ *
+ * The function will synchronize on the previous query barrier, and will
+ * thus not finish until that barrier has executed.
+ */
+void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
+{
+       mutex_lock(&dev_priv->cmdbuf_mutex);
+       if (dev_priv->query_cid_valid)
+               __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
        mutex_unlock(&dev_priv->cmdbuf_mutex);
 }
 
index bc187fa..c62d20e 100644 (file)
@@ -537,7 +537,7 @@ static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
                container_of(fence, struct vmw_user_fence, fence);
        struct vmw_fence_manager *fman = fence->fman;
 
-       kfree(ufence);
+       ttm_base_object_kfree(ufence, base);
        /*
         * Free kernel space accounting.
         */
index b07ca2e..d9fbbe1 100644 (file)
@@ -110,6 +110,8 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
        memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
 
        ret = copy_to_user(buffer, bounce, size);
+       if (ret)
+               ret = -EFAULT;
        vfree(bounce);
 
        if (unlikely(ret != 0))
@@ -131,6 +133,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
        struct drm_vmw_rect *clips = NULL;
        struct drm_mode_object *obj;
        struct vmw_framebuffer *vfb;
+       struct vmw_resource *res;
        uint32_t num_clips;
        int ret;
 
@@ -178,11 +181,13 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
        if (unlikely(ret != 0))
                goto out_no_ttm_lock;
 
-       ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid,
-                                            &surface);
+       ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
+                                             user_surface_converter,
+                                             &res);
        if (ret)
                goto out_no_surface;
 
+       surface = vmw_res_to_srf(res);
        ret = vmw_kms_present(dev_priv, file_priv,
                              vfb, surface, arg->sid,
                              arg->dest_x, arg->dest_y,
index 070fb23..79f7e8e 100644 (file)
@@ -373,7 +373,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
 
        drm_mode_crtc_set_gamma_size(crtc, 256);
 
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                      dev->mode_config.dirty_info_property,
                                      1);
 
index 292c988..e01a17b 100644 (file)
 #include <drm/ttm/ttm_object.h>
 #include <drm/ttm/ttm_placement.h>
 #include <drm/drmP.h>
-
-struct vmw_user_context {
-       struct ttm_base_object base;
-       struct vmw_resource res;
-};
-
-struct vmw_user_surface {
-       struct ttm_base_object base;
-       struct vmw_surface srf;
-       uint32_t size;
-};
+#include "vmwgfx_resource_priv.h"
 
 struct vmw_user_dma_buffer {
        struct ttm_base_object base;
@@ -62,17 +52,21 @@ struct vmw_user_stream {
        struct vmw_stream stream;
 };
 
-struct vmw_surface_offset {
-       uint32_t face;
-       uint32_t mip;
-       uint32_t bo_offset;
-};
-
 
-static uint64_t vmw_user_context_size;
-static uint64_t vmw_user_surface_size;
 static uint64_t vmw_user_stream_size;
 
+static const struct vmw_res_func vmw_stream_func = {
+       .res_type = vmw_res_stream,
+       .needs_backup = false,
+       .may_evict = false,
+       .type_name = "video streams",
+       .backup_placement = NULL,
+       .create = NULL,
+       .destroy = NULL,
+       .bind = NULL,
+       .unbind = NULL
+};
+
 static inline struct vmw_dma_buffer *
 vmw_dma_buffer(struct ttm_buffer_object *bo)
 {
@@ -100,13 +94,14 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  *
  * Release the resource id to the resource id manager and set it to -1
  */
-static void vmw_resource_release_id(struct vmw_resource *res)
+void vmw_resource_release_id(struct vmw_resource *res)
 {
        struct vmw_private *dev_priv = res->dev_priv;
+       struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
        write_lock(&dev_priv->resource_lock);
        if (res->id != -1)
-               idr_remove(res->idr, res->id);
+               idr_remove(idr, res->id);
        res->id = -1;
        write_unlock(&dev_priv->resource_lock);
 }
@@ -116,17 +111,33 @@ static void vmw_resource_release(struct kref *kref)
        struct vmw_resource *res =
            container_of(kref, struct vmw_resource, kref);
        struct vmw_private *dev_priv = res->dev_priv;
-       int id = res->id;
-       struct idr *idr = res->idr;
+       int id;
+       struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
        res->avail = false;
-       if (res->remove_from_lists != NULL)
-               res->remove_from_lists(res);
+       list_del_init(&res->lru_head);
        write_unlock(&dev_priv->resource_lock);
+       if (res->backup) {
+               struct ttm_buffer_object *bo = &res->backup->base;
+
+               ttm_bo_reserve(bo, false, false, false, 0);
+               if (!list_empty(&res->mob_head) &&
+                   res->func->unbind != NULL) {
+                       struct ttm_validate_buffer val_buf;
+
+                       val_buf.bo = bo;
+                       res->func->unbind(res, false, &val_buf);
+               }
+               res->backup_dirty = false;
+               list_del_init(&res->mob_head);
+               ttm_bo_unreserve(bo);
+               vmw_dmabuf_unreference(&res->backup);
+       }
 
        if (likely(res->hw_destroy != NULL))
                res->hw_destroy(res);
 
+       id = res->id;
        if (res->res_free != NULL)
                res->res_free(res);
        else
@@ -153,25 +164,25 @@ void vmw_resource_unreference(struct vmw_resource **p_res)
 /**
  * vmw_resource_alloc_id - release a resource id to the id manager.
  *
- * @dev_priv: Pointer to the device private structure.
  * @res: Pointer to the resource.
  *
  * Allocate the lowest free resource from the resource manager, and set
  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
  */
-static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
-                                struct vmw_resource *res)
+int vmw_resource_alloc_id(struct vmw_resource *res)
 {
+       struct vmw_private *dev_priv = res->dev_priv;
        int ret;
+       struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
        BUG_ON(res->id != -1);
 
        do {
-               if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0))
+               if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
                        return -ENOMEM;
 
                write_lock(&dev_priv->resource_lock);
-               ret = idr_get_new_above(res->idr, res, 1, &res->id);
+               ret = idr_get_new_above(idr, res, 1, &res->id);
                write_unlock(&dev_priv->resource_lock);
 
        } while (ret == -EAGAIN);
@@ -179,31 +190,39 @@ static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
        return ret;
 }
 
-
-static int vmw_resource_init(struct vmw_private *dev_priv,
-                            struct vmw_resource *res,
-                            struct idr *idr,
-                            enum ttm_object_type obj_type,
-                            bool delay_id,
-                            void (*res_free) (struct vmw_resource *res),
-                            void (*remove_from_lists)
-                            (struct vmw_resource *res))
+/**
+ * vmw_resource_init - initialize a struct vmw_resource
+ *
+ * @dev_priv:       Pointer to a device private struct.
+ * @res:            The struct vmw_resource to initialize.
+ * @obj_type:       Resource object type.
+ * @delay_id:       Boolean whether to defer device id allocation until
+ *                  the first validation.
+ * @res_free:       Resource destructor.
+ * @func:           Resource function table.
+ */
+int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
+                     bool delay_id,
+                     void (*res_free) (struct vmw_resource *res),
+                     const struct vmw_res_func *func)
 {
        kref_init(&res->kref);
        res->hw_destroy = NULL;
        res->res_free = res_free;
-       res->remove_from_lists = remove_from_lists;
-       res->res_type = obj_type;
-       res->idr = idr;
        res->avail = false;
        res->dev_priv = dev_priv;
-       INIT_LIST_HEAD(&res->query_head);
-       INIT_LIST_HEAD(&res->validate_head);
+       res->func = func;
+       INIT_LIST_HEAD(&res->lru_head);
+       INIT_LIST_HEAD(&res->mob_head);
        res->id = -1;
+       res->backup = NULL;
+       res->backup_offset = 0;
+       res->backup_dirty = false;
+       res->res_dirty = false;
        if (delay_id)
                return 0;
        else
-               return vmw_resource_alloc_id(dev_priv, res);
+               return vmw_resource_alloc_id(res);
 }
 
 /**
@@ -218,9 +237,8 @@ static int vmw_resource_init(struct vmw_private *dev_priv,
  * Activate basically means that the function vmw_resource_lookup will
  * find it.
  */
-
-static void vmw_resource_activate(struct vmw_resource *res,
-                                 void (*hw_destroy) (struct vmw_resource *))
+void vmw_resource_activate(struct vmw_resource *res,
+                          void (*hw_destroy) (struct vmw_resource *))
 {
        struct vmw_private *dev_priv = res->dev_priv;
 
@@ -250,1392 +268,245 @@ struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
 }
 
 /**
- * Context management:
+ * vmw_user_resource_lookup_handle - lookup a struct resource from a
+ * TTM user-space handle and perform basic type checks
+ *
+ * @dev_priv:     Pointer to a device private struct
+ * @tfile:        Pointer to a struct ttm_object_file identifying the caller
+ * @handle:       The TTM user-space handle
+ * @converter:    Pointer to an object describing the resource type
+ * @p_res:        On successful return the location pointed to will contain
+ *                a pointer to a refcounted struct vmw_resource.
+ *
+ * If the handle can't be found or is associated with an incorrect resource
+ * type, -EINVAL will be returned.
  */
-
-static void vmw_hw_context_destroy(struct vmw_resource *res)
-{
-
-       struct vmw_private *dev_priv = res->dev_priv;
-       struct {
-               SVGA3dCmdHeader header;
-               SVGA3dCmdDestroyContext body;
-       } *cmd;
-
-
-       vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
-
-       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-       if (unlikely(cmd == NULL)) {
-               DRM_ERROR("Failed reserving FIFO space for surface "
-                         "destruction.\n");
-               return;
-       }
-
-       cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
-       cmd->header.size = cpu_to_le32(sizeof(cmd->body));
-       cmd->body.cid = cpu_to_le32(res->id);
-
-       vmw_fifo_commit(dev_priv, sizeof(*cmd));
-       vmw_3d_resource_dec(dev_priv, false);
-}
-
-static int vmw_context_init(struct vmw_private *dev_priv,
-                           struct vmw_resource *res,
-                           void (*res_free) (struct vmw_resource *res))
+int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
+                                   struct ttm_object_file *tfile,
+                                   uint32_t handle,
+                                   const struct vmw_user_resource_conv
+                                   *converter,
+                                   struct vmw_resource **p_res)
 {
-       int ret;
+       struct ttm_base_object *base;
+       struct vmw_resource *res;
+       int ret = -EINVAL;
 
-       struct {
-               SVGA3dCmdHeader header;
-               SVGA3dCmdDefineContext body;
-       } *cmd;
+       base = ttm_base_object_lookup(tfile, handle);
+       if (unlikely(base == NULL))
+               return -EINVAL;
 
-       ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
-                               VMW_RES_CONTEXT, false, res_free, NULL);
+       if (unlikely(base->object_type != converter->object_type))
+               goto out_bad_resource;
 
-       if (unlikely(ret != 0)) {
-               DRM_ERROR("Failed to allocate a resource id.\n");
-               goto out_early;
-       }
+       res = converter->base_obj_to_res(base);
 
-       if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
-               DRM_ERROR("Out of hw context ids.\n");
-               vmw_resource_unreference(&res);
-               return -ENOMEM;
+       read_lock(&dev_priv->resource_lock);
+       if (!res->avail || res->res_free != converter->res_free) {
+               read_unlock(&dev_priv->resource_lock);
+               goto out_bad_resource;
        }
 
-       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-       if (unlikely(cmd == NULL)) {
-               DRM_ERROR("Fifo reserve failed.\n");
-               vmw_resource_unreference(&res);
-               return -ENOMEM;
-       }
+       kref_get(&res->kref);
+       read_unlock(&dev_priv->resource_lock);
 
-       cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
-       cmd->header.size = cpu_to_le32(sizeof(cmd->body));
-       cmd->body.cid = cpu_to_le32(res->id);
+       *p_res = res;
+       ret = 0;
 
-       vmw_fifo_commit(dev_priv, sizeof(*cmd));
-       (void) vmw_3d_resource_inc(dev_priv, false);
-       vmw_resource_activate(res, vmw_hw_context_destroy);
-       return 0;
+out_bad_resource:
+       ttm_base_object_unref(&base);
 
-out_early:
-       if (res_free == NULL)
-               kfree(res);
-       else
-               res_free(res);
        return ret;
 }
 
-struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
+/**
+ * Helper function that looks either a surface or dmabuf.
+ *
+ * The pointer this pointed at by out_surf and out_buf needs to be null.
+ */
+int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+                          struct ttm_object_file *tfile,
+                          uint32_t handle,
+                          struct vmw_surface **out_surf,
+                          struct vmw_dma_buffer **out_buf)
 {
-       struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
+       struct vmw_resource *res;
        int ret;
 
-       if (unlikely(res == NULL))
-               return NULL;
-
-       ret = vmw_context_init(dev_priv, res, NULL);
-       return (ret == 0) ? res : NULL;
-}
-
-/**
- * User-space context management:
- */
+       BUG_ON(*out_surf || *out_buf);
 
-static void vmw_user_context_free(struct vmw_resource *res)
-{
-       struct vmw_user_context *ctx =
-           container_of(res, struct vmw_user_context, res);
-       struct vmw_private *dev_priv = res->dev_priv;
+       ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
+                                             user_surface_converter,
+                                             &res);
+       if (!ret) {
+               *out_surf = vmw_res_to_srf(res);
+               return 0;
+       }
 
-       ttm_base_object_kfree(ctx, base);
-       ttm_mem_global_free(vmw_mem_glob(dev_priv),
-                           vmw_user_context_size);
+       *out_surf = NULL;
+       ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
+       return ret;
 }
 
 /**
- * This function is called when user space has no more references on the
- * base object. It releases the base-object's reference on the resource object.
+ * Buffer management.
  */
-
-static void vmw_user_context_base_release(struct ttm_base_object **p_base)
+void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
 {
-       struct ttm_base_object *base = *p_base;
-       struct vmw_user_context *ctx =
-           container_of(base, struct vmw_user_context, base);
-       struct vmw_resource *res = &ctx->res;
+       struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
 
-       *p_base = NULL;
-       vmw_resource_unreference(&res);
+       kfree(vmw_bo);
 }
 
-int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
-                             struct drm_file *file_priv)
+int vmw_dmabuf_init(struct vmw_private *dev_priv,
+                   struct vmw_dma_buffer *vmw_bo,
+                   size_t size, struct ttm_placement *placement,
+                   bool interruptible,
+                   void (*bo_free) (struct ttm_buffer_object *bo))
 {
-       struct vmw_private *dev_priv = vmw_priv(dev);
-       struct vmw_resource *res;
-       struct vmw_user_context *ctx;
-       struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
-       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       int ret = 0;
+       struct ttm_bo_device *bdev = &dev_priv->bdev;
+       size_t acc_size;
+       int ret;
 
-       res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
-       if (unlikely(res == NULL))
-               return -EINVAL;
+       BUG_ON(!bo_free);
 
-       if (res->res_free != &vmw_user_context_free) {
-               ret = -EINVAL;
-               goto out;
-       }
+       acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
+       memset(vmw_bo, 0, sizeof(*vmw_bo));
 
-       ctx = container_of(res, struct vmw_user_context, res);
-       if (ctx->base.tfile != tfile && !ctx->base.shareable) {
-               ret = -EPERM;
-               goto out;
-       }
+       INIT_LIST_HEAD(&vmw_bo->res_list);
 
-       ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
-out:
-       vmw_resource_unreference(&res);
+       ret = ttm_bo_init(bdev, &vmw_bo->base, size,
+                         ttm_bo_type_device, placement,
+                         0, interruptible,
+                         NULL, acc_size, NULL, bo_free);
        return ret;
 }
 
-int vmw_context_define_ioctl(struct drm_device *dev, void *data,
-                            struct drm_file *file_priv)
+static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
 {
-       struct vmw_private *dev_priv = vmw_priv(dev);
-       struct vmw_user_context *ctx;
-       struct vmw_resource *res;
-       struct vmw_resource *tmp;
-       struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
-       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       struct vmw_master *vmaster = vmw_master(file_priv->master);
-       int ret;
-
+       struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
 
-       /*
-        * Approximate idr memory usage with 128 bytes. It will be limited
-        * by maximum number_of contexts anyway.
-        */
+       ttm_base_object_kfree(vmw_user_bo, base);
+}
 
-       if (unlikely(vmw_user_context_size == 0))
-               vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
+static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
+{
+       struct vmw_user_dma_buffer *vmw_user_bo;
+       struct ttm_base_object *base = *p_base;
+       struct ttm_buffer_object *bo;
 
-       ret = ttm_read_lock(&vmaster->lock, true);
-       if (unlikely(ret != 0))
-               return ret;
+       *p_base = NULL;
 
-       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
-                                  vmw_user_context_size,
-                                  false, true);
-       if (unlikely(ret != 0)) {
-               if (ret != -ERESTARTSYS)
-                       DRM_ERROR("Out of graphics memory for context"
-                                 " creation.\n");
-               goto out_unlock;
-       }
+       if (unlikely(base == NULL))
+               return;
 
-       ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
-       if (unlikely(ctx == NULL)) {
-               ttm_mem_global_free(vmw_mem_glob(dev_priv),
-                                   vmw_user_context_size);
-               ret = -ENOMEM;
-               goto out_unlock;
-       }
+       vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
+       bo = &vmw_user_bo->dma.base;
+       ttm_bo_unref(&bo);
+}
 
-       res = &ctx->res;
-       ctx->base.shareable = false;
-       ctx->base.tfile = NULL;
+/**
+ * vmw_user_dmabuf_alloc - Allocate a user dma buffer
+ *
+ * @dev_priv: Pointer to a struct device private.
+ * @tfile: Pointer to a struct ttm_object_file on which to register the user
+ * object.
+ * @size: Size of the dma buffer.
+ * @shareable: Boolean whether the buffer is shareable with other open files.
+ * @handle: Pointer to where the handle value should be assigned.
+ * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
+ * should be assigned.
+ */
+int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
+                         struct ttm_object_file *tfile,
+                         uint32_t size,
+                         bool shareable,
+                         uint32_t *handle,
+                         struct vmw_dma_buffer **p_dma_buf)
+{
+       struct vmw_user_dma_buffer *user_bo;
+       struct ttm_buffer_object *tmp;
+       int ret;
 
-       /*
-        * From here on, the destructor takes over resource freeing.
-        */
+       user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
+       if (unlikely(user_bo == NULL)) {
+               DRM_ERROR("Failed to allocate a buffer.\n");
+               return -ENOMEM;
+       }
 
-       ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
+       ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
+                             &vmw_vram_sys_placement, true,
+                             &vmw_user_dmabuf_destroy);
        if (unlikely(ret != 0))
-               goto out_unlock;
-
-       tmp = vmw_resource_reference(&ctx->res);
-       ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
-                                  &vmw_user_context_base_release, NULL);
+               return ret;
 
+       tmp = ttm_bo_reference(&user_bo->dma.base);
+       ret = ttm_base_object_init(tfile,
+                                  &user_bo->base,
+                                  shareable,
+                                  ttm_buffer_type,
+                                  &vmw_user_dmabuf_release, NULL);
        if (unlikely(ret != 0)) {
-               vmw_resource_unreference(&tmp);
-               goto out_err;
+               ttm_bo_unref(&tmp);
+               goto out_no_base_object;
        }
 
-       arg->cid = res->id;
-out_err:
-       vmw_resource_unreference(&res);
-out_unlock:
-       ttm_read_unlock(&vmaster->lock);
-       return ret;
-
-}
-
-int vmw_context_check(struct vmw_private *dev_priv,
-                     struct ttm_object_file *tfile,
-                     int id,
-                     struct vmw_resource **p_res)
-{
-       struct vmw_resource *res;
-       int ret = 0;
-
-       read_lock(&dev_priv->resource_lock);
-       res = idr_find(&dev_priv->context_idr, id);
-       if (res && res->avail) {
-               struct vmw_user_context *ctx =
-                       container_of(res, struct vmw_user_context, res);
-               if (ctx->base.tfile != tfile && !ctx->base.shareable)
-                       ret = -EPERM;
-               if (p_res)
-                       *p_res = vmw_resource_reference(res);
-       } else
-               ret = -EINVAL;
-       read_unlock(&dev_priv->resource_lock);
+       *p_dma_buf = &user_bo->dma;
+       *handle = user_bo->base.hash.key;
 
+out_no_base_object:
        return ret;
 }
 
-struct vmw_bpp {
-       uint8_t bpp;
-       uint8_t s_bpp;
-};
-
-/*
- * Size table for the supported SVGA3D surface formats. It consists of
- * two values. The bpp value and the s_bpp value which is short for
- * "stride bits per pixel" The values are given in such a way that the
- * minimum stride for the image is calculated using
- *
- * min_stride = w*s_bpp
- *
- * and the total memory requirement for the image is
- *
- * h*min_stride*bpp/s_bpp
- *
- */
-static const struct vmw_bpp vmw_sf_bpp[] = {
-       [SVGA3D_FORMAT_INVALID] = {0, 0},
-       [SVGA3D_X8R8G8B8] = {32, 32},
-       [SVGA3D_A8R8G8B8] = {32, 32},
-       [SVGA3D_R5G6B5] = {16, 16},
-       [SVGA3D_X1R5G5B5] = {16, 16},
-       [SVGA3D_A1R5G5B5] = {16, 16},
-       [SVGA3D_A4R4G4B4] = {16, 16},
-       [SVGA3D_Z_D32] = {32, 32},
-       [SVGA3D_Z_D16] = {16, 16},
-       [SVGA3D_Z_D24S8] = {32, 32},
-       [SVGA3D_Z_D15S1] = {16, 16},
-       [SVGA3D_LUMINANCE8] = {8, 8},
-       [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8},
-       [SVGA3D_LUMINANCE16] = {16, 16},
-       [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16},
-       [SVGA3D_DXT1] = {4, 16},
-       [SVGA3D_DXT2] = {8, 32},
-       [SVGA3D_DXT3] = {8, 32},
-       [SVGA3D_DXT4] = {8, 32},
-       [SVGA3D_DXT5] = {8, 32},
-       [SVGA3D_BUMPU8V8] = {16, 16},
-       [SVGA3D_BUMPL6V5U5] = {16, 16},
-       [SVGA3D_BUMPX8L8V8U8] = {32, 32},
-       [SVGA3D_ARGB_S10E5] = {16, 16},
-       [SVGA3D_ARGB_S23E8] = {32, 32},
-       [SVGA3D_A2R10G10B10] = {32, 32},
-       [SVGA3D_V8U8] = {16, 16},
-       [SVGA3D_Q8W8V8U8] = {32, 32},
-       [SVGA3D_CxV8U8] = {16, 16},
-       [SVGA3D_X8L8V8U8] = {32, 32},
-       [SVGA3D_A2W10V10U10] = {32, 32},
-       [SVGA3D_ALPHA8] = {8, 8},
-       [SVGA3D_R_S10E5] = {16, 16},
-       [SVGA3D_R_S23E8] = {32, 32},
-       [SVGA3D_RG_S10E5] = {16, 16},
-       [SVGA3D_RG_S23E8] = {32, 32},
-       [SVGA3D_BUFFER] = {8, 8},
-       [SVGA3D_Z_D24X8] = {32, 32},
-       [SVGA3D_V16U16] = {32, 32},
-       [SVGA3D_G16R16] = {32, 32},
-       [SVGA3D_A16B16G16R16] = {64,  64},
-       [SVGA3D_UYVY] = {12, 12},
-       [SVGA3D_YUY2] = {12, 12},
-       [SVGA3D_NV12] = {12, 8},
-       [SVGA3D_AYUV] = {32, 32},
-       [SVGA3D_BC4_UNORM] = {4,  16},
-       [SVGA3D_BC5_UNORM] = {8,  32},
-       [SVGA3D_Z_DF16] = {16,  16},
-       [SVGA3D_Z_DF24] = {24,  24},
-       [SVGA3D_Z_D24S8_INT] = {32,  32}
-};
-
-
-/**
- * Surface management.
- */
-
-struct vmw_surface_dma {
-       SVGA3dCmdHeader header;
-       SVGA3dCmdSurfaceDMA body;
-       SVGA3dCopyBox cb;
-       SVGA3dCmdSurfaceDMASuffix suffix;
-};
-
-struct vmw_surface_define {
-       SVGA3dCmdHeader header;
-       SVGA3dCmdDefineSurface body;
-};
-
-struct vmw_surface_destroy {
-       SVGA3dCmdHeader header;
-       SVGA3dCmdDestroySurface body;
-};
-
-
 /**
- * vmw_surface_dma_size - Compute fifo size for a dma command.
- *
- * @srf: Pointer to a struct vmw_surface
+ * vmw_user_dmabuf_verify_access - verify access permissions on this
+ * buffer object.
  *
- * Computes the required size for a surface dma command for backup or
- * restoration of the surface represented by @srf.
+ * @bo: Pointer to the buffer object being accessed
+ * @tfile: Identifying the caller.
  */
-static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
+int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
+                                 struct ttm_object_file *tfile)
 {
-       return srf->num_sizes * sizeof(struct vmw_surface_dma);
-}
+       struct vmw_user_dma_buffer *vmw_user_bo;
 
+       if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
+               return -EPERM;
 
-/**
- * vmw_surface_define_size - Compute fifo size for a surface define command.
- *
- * @srf: Pointer to a struct vmw_surface
- *
- * Computes the required size for a surface define command for the definition
- * of the surface represented by @srf.
- */
-static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
-{
-       return sizeof(struct vmw_surface_define) + srf->num_sizes *
-               sizeof(SVGA3dSize);
+       vmw_user_bo = vmw_user_dma_buffer(bo);
+       return (vmw_user_bo->base.tfile == tfile ||
+       vmw_user_bo->base.shareable) ? 0 : -EPERM;
 }
 
-
-/**
- * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
- *
- * Computes the required size for a surface destroy command for the destruction
- * of a hw surface.
- */
-static inline uint32_t vmw_surface_destroy_size(void)
+int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
+                          struct drm_file *file_priv)
 {
-       return sizeof(struct vmw_surface_destroy);
-}
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       union drm_vmw_alloc_dmabuf_arg *arg =
+           (union drm_vmw_alloc_dmabuf_arg *)data;
+       struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
+       struct drm_vmw_dmabuf_rep *rep = &arg->rep;
+       struct vmw_dma_buffer *dma_buf;
+       uint32_t handle;
+       struct vmw_master *vmaster = vmw_master(file_priv->master);
+       int ret;
 
-/**
- * vmw_surface_destroy_encode - Encode a surface_destroy command.
- *
- * @id: The surface id
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
- */
-static void vmw_surface_destroy_encode(uint32_t id,
-                                      void *cmd_space)
-{
-       struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
-               cmd_space;
+       ret = ttm_read_lock(&vmaster->lock, true);
+       if (unlikely(ret != 0))
+               return ret;
 
-       cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
-       cmd->header.size = sizeof(cmd->body);
-       cmd->body.sid = id;
-}
-
-/**
- * vmw_surface_define_encode - Encode a surface_define command.
- *
- * @srf: Pointer to a struct vmw_surface object.
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
- */
-static void vmw_surface_define_encode(const struct vmw_surface *srf,
-                                     void *cmd_space)
-{
-       struct vmw_surface_define *cmd = (struct vmw_surface_define *)
-               cmd_space;
-       struct drm_vmw_size *src_size;
-       SVGA3dSize *cmd_size;
-       uint32_t cmd_len;
-       int i;
-
-       cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
-
-       cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
-       cmd->header.size = cmd_len;
-       cmd->body.sid = srf->res.id;
-       cmd->body.surfaceFlags = srf->flags;
-       cmd->body.format = cpu_to_le32(srf->format);
-       for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
-               cmd->body.face[i].numMipLevels = srf->mip_levels[i];
-
-       cmd += 1;
-       cmd_size = (SVGA3dSize *) cmd;
-       src_size = srf->sizes;
-
-       for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
-               cmd_size->width = src_size->width;
-               cmd_size->height = src_size->height;
-               cmd_size->depth = src_size->depth;
-       }
-}
-
-
-/**
- * vmw_surface_dma_encode - Encode a surface_dma command.
- *
- * @srf: Pointer to a struct vmw_surface object.
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
- * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
- * should be placed or read from.
- * @to_surface: Boolean whether to DMA to the surface or from the surface.
- */
-static void vmw_surface_dma_encode(struct vmw_surface *srf,
-                                  void *cmd_space,
-                                  const SVGAGuestPtr *ptr,
-                                  bool to_surface)
-{
-       uint32_t i;
-       uint32_t bpp = vmw_sf_bpp[srf->format].bpp;
-       uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
-       struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
-
-       for (i = 0; i < srf->num_sizes; ++i) {
-               SVGA3dCmdHeader *header = &cmd->header;
-               SVGA3dCmdSurfaceDMA *body = &cmd->body;
-               SVGA3dCopyBox *cb = &cmd->cb;
-               SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
-               const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
-               const struct drm_vmw_size *cur_size = &srf->sizes[i];
-
-               header->id = SVGA_3D_CMD_SURFACE_DMA;
-               header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
-
-               body->guest.ptr = *ptr;
-               body->guest.ptr.offset += cur_offset->bo_offset;
-               body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3;
-               body->host.sid = srf->res.id;
-               body->host.face = cur_offset->face;
-               body->host.mipmap = cur_offset->mip;
-               body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
-                                 SVGA3D_READ_HOST_VRAM);
-               cb->x = 0;
-               cb->y = 0;
-               cb->z = 0;
-               cb->srcx = 0;
-               cb->srcy = 0;
-               cb->srcz = 0;
-               cb->w = cur_size->width;
-               cb->h = cur_size->height;
-               cb->d = cur_size->depth;
-
-               suffix->suffixSize = sizeof(*suffix);
-               suffix->maximumOffset = body->guest.pitch*cur_size->height*
-                       cur_size->depth*bpp / stride_bpp;
-               suffix->flags.discard = 0;
-               suffix->flags.unsynchronized = 0;
-               suffix->flags.reserved = 0;
-               ++cmd;
-       }
-};
-
-
-static void vmw_hw_surface_destroy(struct vmw_resource *res)
-{
-
-       struct vmw_private *dev_priv = res->dev_priv;
-       struct vmw_surface *srf;
-       void *cmd;
-
-       if (res->id != -1) {
-
-               cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
-               if (unlikely(cmd == NULL)) {
-                       DRM_ERROR("Failed reserving FIFO space for surface "
-                                 "destruction.\n");
-                       return;
-               }
-
-               vmw_surface_destroy_encode(res->id, cmd);
-               vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
-
-               /*
-                * used_memory_size_atomic, or separate lock
-                * to avoid taking dev_priv::cmdbuf_mutex in
-                * the destroy path.
-                */
-
-               mutex_lock(&dev_priv->cmdbuf_mutex);
-               srf = container_of(res, struct vmw_surface, res);
-               dev_priv->used_memory_size -= srf->backup_size;
-               mutex_unlock(&dev_priv->cmdbuf_mutex);
-
-       }
-       vmw_3d_resource_dec(dev_priv, false);
-}
-
-void vmw_surface_res_free(struct vmw_resource *res)
-{
-       struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
-
-       if (srf->backup)
-               ttm_bo_unref(&srf->backup);
-       kfree(srf->offsets);
-       kfree(srf->sizes);
-       kfree(srf->snooper.image);
-       kfree(srf);
-}
-
-
-/**
- * vmw_surface_do_validate - make a surface available to the device.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface.
- *
- * If the surface doesn't have a hw id, allocate one, and optionally
- * DMA the backed up surface contents to the device.
- *
- * Returns -EBUSY if there wasn't sufficient device resources to
- * complete the validation. Retry after freeing up resources.
- *
- * May return other errors if the kernel is out of guest resources.
- */
-int vmw_surface_do_validate(struct vmw_private *dev_priv,
-                           struct vmw_surface *srf)
-{
-       struct vmw_resource *res = &srf->res;
-       struct list_head val_list;
-       struct ttm_validate_buffer val_buf;
-       uint32_t submit_size;
-       uint8_t *cmd;
-       int ret;
-
-       if (likely(res->id != -1))
-               return 0;
-
-       if (unlikely(dev_priv->used_memory_size + srf->backup_size >=
-                    dev_priv->memory_size))
-               return -EBUSY;
-
-       /*
-        * Reserve- and validate the backup DMA bo.
-        */
-
-       if (srf->backup) {
-               INIT_LIST_HEAD(&val_list);
-               val_buf.bo = ttm_bo_reference(srf->backup);
-               list_add_tail(&val_buf.head, &val_list);
-               ret = ttm_eu_reserve_buffers(&val_list);
-               if (unlikely(ret != 0))
-                       goto out_no_reserve;
-
-               ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
-                                     true, false, false);
-               if (unlikely(ret != 0))
-                       goto out_no_validate;
-       }
-
-       /*
-        * Alloc id for the resource.
-        */
-
-       ret = vmw_resource_alloc_id(dev_priv, res);
-       if (unlikely(ret != 0)) {
-               DRM_ERROR("Failed to allocate a surface id.\n");
-               goto out_no_id;
-       }
-       if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
-               ret = -EBUSY;
-               goto out_no_fifo;
-       }
-
-
-       /*
-        * Encode surface define- and dma commands.
-        */
-
-       submit_size = vmw_surface_define_size(srf);
-       if (srf->backup)
-               submit_size += vmw_surface_dma_size(srf);
-
-       cmd = vmw_fifo_reserve(dev_priv, submit_size);
-       if (unlikely(cmd == NULL)) {
-               DRM_ERROR("Failed reserving FIFO space for surface "
-                         "validation.\n");
-               ret = -ENOMEM;
-               goto out_no_fifo;
-       }
-
-       vmw_surface_define_encode(srf, cmd);
-       if (srf->backup) {
-               SVGAGuestPtr ptr;
-
-               cmd += vmw_surface_define_size(srf);
-               vmw_bo_get_guest_ptr(srf->backup, &ptr);
-               vmw_surface_dma_encode(srf, cmd, &ptr, true);
-       }
-
-       vmw_fifo_commit(dev_priv, submit_size);
-
-       /*
-        * Create a fence object and fence the backup buffer.
-        */
-
-       if (srf->backup) {
-               struct vmw_fence_obj *fence;
-
-               (void) vmw_execbuf_fence_commands(NULL, dev_priv,
-                                                 &fence, NULL);
-               ttm_eu_fence_buffer_objects(&val_list, fence);
-               if (likely(fence != NULL))
-                       vmw_fence_obj_unreference(&fence);
-               ttm_bo_unref(&val_buf.bo);
-               ttm_bo_unref(&srf->backup);
-       }
-
-       /*
-        * Surface memory usage accounting.
-        */
-
-       dev_priv->used_memory_size += srf->backup_size;
-
-       return 0;
-
-out_no_fifo:
-       vmw_resource_release_id(res);
-out_no_id:
-out_no_validate:
-       if (srf->backup)
-               ttm_eu_backoff_reservation(&val_list);
-out_no_reserve:
-       if (srf->backup)
-               ttm_bo_unref(&val_buf.bo);
-       return ret;
-}
-
-/**
- * vmw_surface_evict - Evict a hw surface.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface
- *
- * DMA the contents of a hw surface to a backup guest buffer object,
- * and destroy the hw surface, releasing its id.
- */
-int vmw_surface_evict(struct vmw_private *dev_priv,
-                     struct vmw_surface *srf)
-{
-       struct vmw_resource *res = &srf->res;
-       struct list_head val_list;
-       struct ttm_validate_buffer val_buf;
-       uint32_t submit_size;
-       uint8_t *cmd;
-       int ret;
-       struct vmw_fence_obj *fence;
-       SVGAGuestPtr ptr;
-
-       BUG_ON(res->id == -1);
-
-       /*
-        * Create a surface backup buffer object.
-        */
-
-       if (!srf->backup) {
-               ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size,
-                                   ttm_bo_type_device,
-                                   &vmw_srf_placement, 0, true,
-                                   NULL, &srf->backup);
-               if (unlikely(ret != 0))
-                       return ret;
-       }
-
-       /*
-        * Reserve- and validate the backup DMA bo.
-        */
-
-       INIT_LIST_HEAD(&val_list);
-       val_buf.bo = ttm_bo_reference(srf->backup);
-       list_add_tail(&val_buf.head, &val_list);
-       ret = ttm_eu_reserve_buffers(&val_list);
-       if (unlikely(ret != 0))
-               goto out_no_reserve;
-
-       ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
-                             true, false, false);
-       if (unlikely(ret != 0))
-               goto out_no_validate;
-
-
-       /*
-        * Encode the dma- and surface destroy commands.
-        */
-
-       submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size();
-       cmd = vmw_fifo_reserve(dev_priv, submit_size);
-       if (unlikely(cmd == NULL)) {
-               DRM_ERROR("Failed reserving FIFO space for surface "
-                         "eviction.\n");
-               ret = -ENOMEM;
-               goto out_no_fifo;
-       }
-
-       vmw_bo_get_guest_ptr(srf->backup, &ptr);
-       vmw_surface_dma_encode(srf, cmd, &ptr, false);
-       cmd += vmw_surface_dma_size(srf);
-       vmw_surface_destroy_encode(res->id, cmd);
-       vmw_fifo_commit(dev_priv, submit_size);
-
-       /*
-        * Surface memory usage accounting.
-        */
-
-       dev_priv->used_memory_size -= srf->backup_size;
-
-       /*
-        * Create a fence object and fence the DMA buffer.
-        */
-
-       (void) vmw_execbuf_fence_commands(NULL, dev_priv,
-                                         &fence, NULL);
-       ttm_eu_fence_buffer_objects(&val_list, fence);
-       if (likely(fence != NULL))
-               vmw_fence_obj_unreference(&fence);
-       ttm_bo_unref(&val_buf.bo);
-
-       /*
-        * Release the surface ID.
-        */
-
-       vmw_resource_release_id(res);
-
-       return 0;
-
-out_no_fifo:
-out_no_validate:
-       if (srf->backup)
-               ttm_eu_backoff_reservation(&val_list);
-out_no_reserve:
-       ttm_bo_unref(&val_buf.bo);
-       ttm_bo_unref(&srf->backup);
-       return ret;
-}
-
-
-/**
- * vmw_surface_validate - make a surface available to the device, evicting
- * other surfaces if needed.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface.
- *
- * Try to validate a surface and if it fails due to limited device resources,
- * repeatedly try to evict other surfaces until the request can be
- * acommodated.
- *
- * May return errors if out of resources.
- */
-int vmw_surface_validate(struct vmw_private *dev_priv,
-                        struct vmw_surface *srf)
-{
-       int ret;
-       struct vmw_surface *evict_srf;
-
-       do {
-               write_lock(&dev_priv->resource_lock);
-               list_del_init(&srf->lru_head);
-               write_unlock(&dev_priv->resource_lock);
-
-               ret = vmw_surface_do_validate(dev_priv, srf);
-               if (likely(ret != -EBUSY))
-                       break;
-
-               write_lock(&dev_priv->resource_lock);
-               if (list_empty(&dev_priv->surface_lru)) {
-                       DRM_ERROR("Out of device memory for surfaces.\n");
-                       ret = -EBUSY;
-                       write_unlock(&dev_priv->resource_lock);
-                       break;
-               }
-
-               evict_srf = vmw_surface_reference
-                       (list_first_entry(&dev_priv->surface_lru,
-                                         struct vmw_surface,
-                                         lru_head));
-               list_del_init(&evict_srf->lru_head);
-
-               write_unlock(&dev_priv->resource_lock);
-               (void) vmw_surface_evict(dev_priv, evict_srf);
-
-               vmw_surface_unreference(&evict_srf);
-
-       } while (1);
-
-       if (unlikely(ret != 0 && srf->res.id != -1)) {
-               write_lock(&dev_priv->resource_lock);
-               list_add_tail(&srf->lru_head, &dev_priv->surface_lru);
-               write_unlock(&dev_priv->resource_lock);
-       }
-
-       return ret;
-}
-
-
-/**
- * vmw_surface_remove_from_lists - Remove surface resources from lookup lists
- *
- * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface
- *
- * As part of the resource destruction, remove the surface from any
- * lookup lists.
- */
-static void vmw_surface_remove_from_lists(struct vmw_resource *res)
-{
-       struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
-
-       list_del_init(&srf->lru_head);
-}
-
-int vmw_surface_init(struct vmw_private *dev_priv,
-                    struct vmw_surface *srf,
-                    void (*res_free) (struct vmw_resource *res))
-{
-       int ret;
-       struct vmw_resource *res = &srf->res;
-
-       BUG_ON(res_free == NULL);
-       INIT_LIST_HEAD(&srf->lru_head);
-       ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
-                               VMW_RES_SURFACE, true, res_free,
-                               vmw_surface_remove_from_lists);
-
-       if (unlikely(ret != 0))
-               res_free(res);
-
-       /*
-        * The surface won't be visible to hardware until a
-        * surface validate.
-        */
-
-       (void) vmw_3d_resource_inc(dev_priv, false);
-       vmw_resource_activate(res, vmw_hw_surface_destroy);
-       return ret;
-}
-
-static void vmw_user_surface_free(struct vmw_resource *res)
-{
-       struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
-       struct vmw_user_surface *user_srf =
-           container_of(srf, struct vmw_user_surface, srf);
-       struct vmw_private *dev_priv = srf->res.dev_priv;
-       uint32_t size = user_srf->size;
-
-       if (srf->backup)
-               ttm_bo_unref(&srf->backup);
-       kfree(srf->offsets);
-       kfree(srf->sizes);
-       kfree(srf->snooper.image);
-       ttm_base_object_kfree(user_srf, base);
-       ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
-}
-
-/**
- * vmw_resource_unreserve - unreserve resources previously reserved for
- * command submission.
- *
- * @list_head: list of resources to unreserve.
- *
- * Currently only surfaces are considered, and unreserving a surface
- * means putting it back on the device's surface lru list,
- * so that it can be evicted if necessary.
- * This function traverses the resource list and
- * checks whether resources are surfaces, and in that case puts them back
- * on the device's surface LRU list.
- */
-void vmw_resource_unreserve(struct list_head *list)
-{
-       struct vmw_resource *res;
-       struct vmw_surface *srf;
-       rwlock_t *lock = NULL;
-
-       list_for_each_entry(res, list, validate_head) {
-
-               if (res->res_free != &vmw_surface_res_free &&
-                   res->res_free != &vmw_user_surface_free)
-                       continue;
-
-               if (unlikely(lock == NULL)) {
-                       lock = &res->dev_priv->resource_lock;
-                       write_lock(lock);
-               }
-
-               srf = container_of(res, struct vmw_surface, res);
-               list_del_init(&srf->lru_head);
-               list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru);
-       }
-
-       if (lock != NULL)
-               write_unlock(lock);
-}
-
-/**
- * Helper function that looks either a surface or dmabuf.
- *
- * The pointer this pointed at by out_surf and out_buf needs to be null.
- */
-int vmw_user_lookup_handle(struct vmw_private *dev_priv,
-                          struct ttm_object_file *tfile,
-                          uint32_t handle,
-                          struct vmw_surface **out_surf,
-                          struct vmw_dma_buffer **out_buf)
-{
-       int ret;
-
-       BUG_ON(*out_surf || *out_buf);
-
-       ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
-       if (!ret)
-               return 0;
-
-       ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
-       return ret;
-}
-
-
-int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
-                                  struct ttm_object_file *tfile,
-                                  uint32_t handle, struct vmw_surface **out)
-{
-       struct vmw_resource *res;
-       struct vmw_surface *srf;
-       struct vmw_user_surface *user_srf;
-       struct ttm_base_object *base;
-       int ret = -EINVAL;
-
-       base = ttm_base_object_lookup(tfile, handle);
-       if (unlikely(base == NULL))
-               return -EINVAL;
-
-       if (unlikely(base->object_type != VMW_RES_SURFACE))
-               goto out_bad_resource;
-
-       user_srf = container_of(base, struct vmw_user_surface, base);
-       srf = &user_srf->srf;
-       res = &srf->res;
-
-       read_lock(&dev_priv->resource_lock);
-
-       if (!res->avail || res->res_free != &vmw_user_surface_free) {
-               read_unlock(&dev_priv->resource_lock);
-               goto out_bad_resource;
-       }
-
-       kref_get(&res->kref);
-       read_unlock(&dev_priv->resource_lock);
-
-       *out = srf;
-       ret = 0;
-
-out_bad_resource:
-       ttm_base_object_unref(&base);
-
-       return ret;
-}
-
-static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
-{
-       struct ttm_base_object *base = *p_base;
-       struct vmw_user_surface *user_srf =
-           container_of(base, struct vmw_user_surface, base);
-       struct vmw_resource *res = &user_srf->srf.res;
-
-       *p_base = NULL;
-       vmw_resource_unreference(&res);
-}
-
-int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
-                             struct drm_file *file_priv)
-{
-       struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
-       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-
-       return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
-}
-
-int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
-                            struct drm_file *file_priv)
-{
-       struct vmw_private *dev_priv = vmw_priv(dev);
-       struct vmw_user_surface *user_srf;
-       struct vmw_surface *srf;
-       struct vmw_resource *res;
-       struct vmw_resource *tmp;
-       union drm_vmw_surface_create_arg *arg =
-           (union drm_vmw_surface_create_arg *)data;
-       struct drm_vmw_surface_create_req *req = &arg->req;
-       struct drm_vmw_surface_arg *rep = &arg->rep;
-       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       struct drm_vmw_size __user *user_sizes;
-       int ret;
-       int i, j;
-       uint32_t cur_bo_offset;
-       struct drm_vmw_size *cur_size;
-       struct vmw_surface_offset *cur_offset;
-       uint32_t stride_bpp;
-       uint32_t bpp;
-       uint32_t num_sizes;
-       uint32_t size;
-       struct vmw_master *vmaster = vmw_master(file_priv->master);
-
-       if (unlikely(vmw_user_surface_size == 0))
-               vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
-                       128;
-
-       num_sizes = 0;
-       for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
-               num_sizes += req->mip_levels[i];
-
-       if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
-           DRM_VMW_MAX_MIP_LEVELS)
-               return -EINVAL;
-
-       size = vmw_user_surface_size + 128 +
-               ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
-               ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
-
-
-       ret = ttm_read_lock(&vmaster->lock, true);
-       if (unlikely(ret != 0))
-               return ret;
-
-       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
-                                  size, false, true);
-       if (unlikely(ret != 0)) {
-               if (ret != -ERESTARTSYS)
-                       DRM_ERROR("Out of graphics memory for surface"
-                                 " creation.\n");
-               goto out_unlock;
-       }
-
-       user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL);
-       if (unlikely(user_srf == NULL)) {
-               ret = -ENOMEM;
-               goto out_no_user_srf;
-       }
-
-       srf = &user_srf->srf;
-       res = &srf->res;
-
-       srf->flags = req->flags;
-       srf->format = req->format;
-       srf->scanout = req->scanout;
-       srf->backup = NULL;
-
-       memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
-       srf->num_sizes = num_sizes;
-       user_srf->size = size;
-
-       srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
-       if (unlikely(srf->sizes == NULL)) {
-               ret = -ENOMEM;
-               goto out_no_sizes;
-       }
-       srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
-                              GFP_KERNEL);
-       if (unlikely(srf->sizes == NULL)) {
-               ret = -ENOMEM;
-               goto out_no_offsets;
-       }
-
-       user_sizes = (struct drm_vmw_size __user *)(unsigned long)
-           req->size_addr;
-
-       ret = copy_from_user(srf->sizes, user_sizes,
-                            srf->num_sizes * sizeof(*srf->sizes));
-       if (unlikely(ret != 0)) {
-               ret = -EFAULT;
-               goto out_no_copy;
-       }
-
-       cur_bo_offset = 0;
-       cur_offset = srf->offsets;
-       cur_size = srf->sizes;
-
-       bpp = vmw_sf_bpp[srf->format].bpp;
-       stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
-
-       for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
-               for (j = 0; j < srf->mip_levels[i]; ++j) {
-                       uint32_t stride =
-                               (cur_size->width * stride_bpp + 7) >> 3;
-
-                       cur_offset->face = i;
-                       cur_offset->mip = j;
-                       cur_offset->bo_offset = cur_bo_offset;
-                       cur_bo_offset += stride * cur_size->height *
-                               cur_size->depth * bpp / stride_bpp;
-                       ++cur_offset;
-                       ++cur_size;
-               }
-       }
-       srf->backup_size = cur_bo_offset;
-
-       if (srf->scanout &&
-           srf->num_sizes == 1 &&
-           srf->sizes[0].width == 64 &&
-           srf->sizes[0].height == 64 &&
-           srf->format == SVGA3D_A8R8G8B8) {
-
-               /* allocate image area and clear it */
-               srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
-               if (!srf->snooper.image) {
-                       DRM_ERROR("Failed to allocate cursor_image\n");
-                       ret = -ENOMEM;
-                       goto out_no_copy;
-               }
-       } else {
-               srf->snooper.image = NULL;
-       }
-       srf->snooper.crtc = NULL;
-
-       user_srf->base.shareable = false;
-       user_srf->base.tfile = NULL;
-
-       /**
-        * From this point, the generic resource management functions
-        * destroy the object on failure.
-        */
-
-       ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
-       if (unlikely(ret != 0))
-               goto out_unlock;
-
-       tmp = vmw_resource_reference(&srf->res);
-       ret = ttm_base_object_init(tfile, &user_srf->base,
-                                  req->shareable, VMW_RES_SURFACE,
-                                  &vmw_user_surface_base_release, NULL);
-
-       if (unlikely(ret != 0)) {
-               vmw_resource_unreference(&tmp);
-               vmw_resource_unreference(&res);
-               goto out_unlock;
-       }
-
-       rep->sid = user_srf->base.hash.key;
-       if (rep->sid == SVGA3D_INVALID_ID)
-               DRM_ERROR("Created bad Surface ID.\n");
-
-       vmw_resource_unreference(&res);
-
-       ttm_read_unlock(&vmaster->lock);
-       return 0;
-out_no_copy:
-       kfree(srf->offsets);
-out_no_offsets:
-       kfree(srf->sizes);
-out_no_sizes:
-       kfree(user_srf);
-out_no_user_srf:
-       ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
-out_unlock:
-       ttm_read_unlock(&vmaster->lock);
-       return ret;
-}
-
-int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
-                               struct drm_file *file_priv)
-{
-       union drm_vmw_surface_reference_arg *arg =
-           (union drm_vmw_surface_reference_arg *)data;
-       struct drm_vmw_surface_arg *req = &arg->req;
-       struct drm_vmw_surface_create_req *rep = &arg->rep;
-       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       struct vmw_surface *srf;
-       struct vmw_user_surface *user_srf;
-       struct drm_vmw_size __user *user_sizes;
-       struct ttm_base_object *base;
-       int ret = -EINVAL;
-
-       base = ttm_base_object_lookup(tfile, req->sid);
-       if (unlikely(base == NULL)) {
-               DRM_ERROR("Could not find surface to reference.\n");
-               return -EINVAL;
-       }
-
-       if (unlikely(base->object_type != VMW_RES_SURFACE))
-               goto out_bad_resource;
-
-       user_srf = container_of(base, struct vmw_user_surface, base);
-       srf = &user_srf->srf;
-
-       ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
-       if (unlikely(ret != 0)) {
-               DRM_ERROR("Could not add a reference to a surface.\n");
-               goto out_no_reference;
-       }
-
-       rep->flags = srf->flags;
-       rep->format = srf->format;
-       memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
-       user_sizes = (struct drm_vmw_size __user *)(unsigned long)
-           rep->size_addr;
-
-       if (user_sizes)
-               ret = copy_to_user(user_sizes, srf->sizes,
-                                  srf->num_sizes * sizeof(*srf->sizes));
-       if (unlikely(ret != 0)) {
-               DRM_ERROR("copy_to_user failed %p %u\n",
-                         user_sizes, srf->num_sizes);
-               ret = -EFAULT;
-       }
-out_bad_resource:
-out_no_reference:
-       ttm_base_object_unref(&base);
-
-       return ret;
-}
-
-int vmw_surface_check(struct vmw_private *dev_priv,
-                     struct ttm_object_file *tfile,
-                     uint32_t handle, int *id)
-{
-       struct ttm_base_object *base;
-       struct vmw_user_surface *user_srf;
-
-       int ret = -EPERM;
-
-       base = ttm_base_object_lookup(tfile, handle);
-       if (unlikely(base == NULL))
-               return -EINVAL;
-
-       if (unlikely(base->object_type != VMW_RES_SURFACE))
-               goto out_bad_surface;
-
-       user_srf = container_of(base, struct vmw_user_surface, base);
-       *id = user_srf->srf.res.id;
-       ret = 0;
-
-out_bad_surface:
-       /**
-        * FIXME: May deadlock here when called from the
-        * command parsing code.
-        */
-
-       ttm_base_object_unref(&base);
-       return ret;
-}
-
-/**
- * Buffer management.
- */
-void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
-{
-       struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
-       kfree(vmw_bo);
-}
-
-int vmw_dmabuf_init(struct vmw_private *dev_priv,
-                   struct vmw_dma_buffer *vmw_bo,
-                   size_t size, struct ttm_placement *placement,
-                   bool interruptible,
-                   void (*bo_free) (struct ttm_buffer_object *bo))
-{
-       struct ttm_bo_device *bdev = &dev_priv->bdev;
-       size_t acc_size;
-       int ret;
-
-       BUG_ON(!bo_free);
-
-       acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
-       memset(vmw_bo, 0, sizeof(*vmw_bo));
-
-       INIT_LIST_HEAD(&vmw_bo->validate_list);
-
-       ret = ttm_bo_init(bdev, &vmw_bo->base, size,
-                         ttm_bo_type_device, placement,
-                         0, interruptible,
-                         NULL, acc_size, NULL, bo_free);
-       return ret;
-}
-
-static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
-{
-       struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
-
-       ttm_base_object_kfree(vmw_user_bo, base);
-}
-
-static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
-{
-       struct vmw_user_dma_buffer *vmw_user_bo;
-       struct ttm_base_object *base = *p_base;
-       struct ttm_buffer_object *bo;
-
-       *p_base = NULL;
-
-       if (unlikely(base == NULL))
-               return;
-
-       vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
-       bo = &vmw_user_bo->dma.base;
-       ttm_bo_unref(&bo);
-}
-
-int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
-                          struct drm_file *file_priv)
-{
-       struct vmw_private *dev_priv = vmw_priv(dev);
-       union drm_vmw_alloc_dmabuf_arg *arg =
-           (union drm_vmw_alloc_dmabuf_arg *)data;
-       struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
-       struct drm_vmw_dmabuf_rep *rep = &arg->rep;
-       struct vmw_user_dma_buffer *vmw_user_bo;
-       struct ttm_buffer_object *tmp;
-       struct vmw_master *vmaster = vmw_master(file_priv->master);
-       int ret;
-
-       vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
-       if (unlikely(vmw_user_bo == NULL))
-               return -ENOMEM;
-
-       ret = ttm_read_lock(&vmaster->lock, true);
-       if (unlikely(ret != 0)) {
-               kfree(vmw_user_bo);
-               return ret;
-       }
-
-       ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
-                             &vmw_vram_sys_placement, true,
-                             &vmw_user_dmabuf_destroy);
-       if (unlikely(ret != 0))
-               goto out_no_dmabuf;
-
-       tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
-       ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
-                                  &vmw_user_bo->base,
-                                  false,
-                                  ttm_buffer_type,
-                                  &vmw_user_dmabuf_release, NULL);
-       if (unlikely(ret != 0))
-               goto out_no_base_object;
-       else {
-               rep->handle = vmw_user_bo->base.hash.key;
-               rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
-               rep->cur_gmr_id = vmw_user_bo->base.hash.key;
-               rep->cur_gmr_offset = 0;
-       }
+       ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+                                   req->size, false, &handle, &dma_buf);
+       if (unlikely(ret != 0))
+               goto out_no_dmabuf;
+
+       rep->handle = handle;
+       rep->map_handle = dma_buf->base.addr_space_offset;
+       rep->cur_gmr_id = handle;
+       rep->cur_gmr_offset = 0;
+
+       vmw_dmabuf_unreference(&dma_buf);
 
-out_no_base_object:
-       ttm_bo_unref(&tmp);
 out_no_dmabuf:
        ttm_read_unlock(&vmaster->lock);
 
@@ -1653,27 +524,6 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
                                         TTM_REF_USAGE);
 }
 
-uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
-                                 uint32_t cur_validate_node)
-{
-       struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
-       if (likely(vmw_bo->on_validate_list))
-               return vmw_bo->cur_validate_node;
-
-       vmw_bo->cur_validate_node = cur_validate_node;
-       vmw_bo->on_validate_list = true;
-
-       return cur_validate_node;
-}
-
-void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
-{
-       struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
-       vmw_bo->on_validate_list = false;
-}
-
 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
                           uint32_t handle, struct vmw_dma_buffer **out)
 {
@@ -1702,6 +552,18 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
        return 0;
 }
 
+int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
+                             struct vmw_dma_buffer *dma_buf)
+{
+       struct vmw_user_dma_buffer *user_bo;
+
+       if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
+               return -EINVAL;
+
+       user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
+       return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
+}
+
 /*
  * Stream management
  */
@@ -1726,8 +588,8 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
        struct vmw_resource *res = &stream->res;
        int ret;
 
-       ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
-                               VMW_RES_STREAM, false, res_free, NULL);
+       ret = vmw_resource_init(dev_priv, res, false, res_free,
+                               &vmw_stream_func);
 
        if (unlikely(ret != 0)) {
                if (res_free == NULL)
@@ -1749,10 +611,6 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
        return 0;
 }
 
-/**
- * User-space context management:
- */
-
 static void vmw_user_stream_free(struct vmw_resource *res)
 {
        struct vmw_user_stream *stream =
@@ -1788,9 +646,11 @@ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
        struct vmw_user_stream *stream;
        struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
        int ret = 0;
 
-       res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
+
+       res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
        if (unlikely(res == NULL))
                return -EINVAL;
 
@@ -1891,7 +751,8 @@ int vmw_user_stream_lookup(struct vmw_private *dev_priv,
        struct vmw_resource *res;
        int ret;
 
-       res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
+       res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
+                                 *inout_id);
        if (unlikely(res == NULL))
                return -EINVAL;
 
@@ -1986,3 +847,453 @@ int vmw_dumb_destroy(struct drm_file *file_priv,
        return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
                                         handle, TTM_REF_USAGE);
 }
+
+/**
+ * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
+ *
+ * @res:            The resource for which to allocate a backup buffer.
+ * @interruptible:  Whether any sleeps during allocation should be
+ *                  performed while interruptible.
+ */
+static int vmw_resource_buf_alloc(struct vmw_resource *res,
+                                 bool interruptible)
+{
+       unsigned long size =
+               (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+       struct vmw_dma_buffer *backup;
+       int ret;
+
+       if (likely(res->backup)) {
+               BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
+               return 0;
+       }
+
+       backup = kzalloc(sizeof(*backup), GFP_KERNEL);
+       if (unlikely(backup == NULL))
+               return -ENOMEM;
+
+       ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
+                             res->func->backup_placement,
+                             interruptible,
+                             &vmw_dmabuf_bo_free);
+       if (unlikely(ret != 0))
+               goto out_no_dmabuf;
+
+       res->backup = backup;
+
+out_no_dmabuf:
+       return ret;
+}
+
+/**
+ * vmw_resource_do_validate - Make a resource up-to-date and visible
+ *                            to the device.
+ *
+ * @res:            The resource to make visible to the device.
+ * @val_buf:        Information about a buffer possibly
+ *                  containing backup data if a bind operation is needed.
+ *
+ * On hardware resource shortage, this function returns -EBUSY and
+ * should be retried once resources have been freed up.
+ */
+static int vmw_resource_do_validate(struct vmw_resource *res,
+                                   struct ttm_validate_buffer *val_buf)
+{
+       int ret = 0;
+       const struct vmw_res_func *func = res->func;
+
+       if (unlikely(res->id == -1)) {
+               ret = func->create(res);
+               if (unlikely(ret != 0))
+                       return ret;
+       }
+
+       if (func->bind &&
+           ((func->needs_backup && list_empty(&res->mob_head) &&
+             val_buf->bo != NULL) ||
+            (!func->needs_backup && val_buf->bo != NULL))) {
+               ret = func->bind(res, val_buf);
+               if (unlikely(ret != 0))
+                       goto out_bind_failed;
+               if (func->needs_backup)
+                       list_add_tail(&res->mob_head, &res->backup->res_list);
+       }
+
+       /*
+        * Only do this on write operations, and move to
+        * vmw_resource_unreserve if it can be called after
+        * backup buffers have been unreserved. Otherwise
+        * sort out locking.
+        */
+       res->res_dirty = true;
+
+       return 0;
+
+out_bind_failed:
+       func->destroy(res);
+
+       return ret;
+}
+
+/**
+ * vmw_resource_unreserve - Unreserve a resource previously reserved for
+ * command submission.
+ *
+ * @res:               Pointer to the struct vmw_resource to unreserve.
+ * @new_backup:        Pointer to new backup buffer if command submission
+ *                     switched.
+ * @new_backup_offset: New backup offset if @new_backup is !NULL.
+ *
+ * Currently unreserving a resource means putting it back on the device's
+ * resource lru list, so that it can be evicted if necessary.
+ */
+void vmw_resource_unreserve(struct vmw_resource *res,
+                           struct vmw_dma_buffer *new_backup,
+                           unsigned long new_backup_offset)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+
+       if (!list_empty(&res->lru_head))
+               return;
+
+       if (new_backup && new_backup != res->backup) {
+
+               if (res->backup) {
+                       BUG_ON(atomic_read(&res->backup->base.reserved) == 0);
+                       list_del_init(&res->mob_head);
+                       vmw_dmabuf_unreference(&res->backup);
+               }
+
+               res->backup = vmw_dmabuf_reference(new_backup);
+               BUG_ON(atomic_read(&new_backup->base.reserved) == 0);
+               list_add_tail(&res->mob_head, &new_backup->res_list);
+       }
+       if (new_backup)
+               res->backup_offset = new_backup_offset;
+
+       if (!res->func->may_evict)
+               return;
+
+       write_lock(&dev_priv->resource_lock);
+       list_add_tail(&res->lru_head,
+                     &res->dev_priv->res_lru[res->func->res_type]);
+       write_unlock(&dev_priv->resource_lock);
+}
+
+/**
+ * vmw_resource_check_buffer - Check whether a backup buffer is needed
+ *                             for a resource and in that case, allocate
+ *                             one, reserve and validate it.
+ *
+ * @res:            The resource for which to allocate a backup buffer.
+ * @interruptible:  Whether any sleeps during allocation should be
+ *                  performed while interruptible.
+ * @val_buf:        On successful return contains data about the
+ *                  reserved and validated backup buffer.
+ */
+int vmw_resource_check_buffer(struct vmw_resource *res,
+                             bool interruptible,
+                             struct ttm_validate_buffer *val_buf)
+{
+       struct list_head val_list;
+       bool backup_dirty = false;
+       int ret;
+
+       if (unlikely(res->backup == NULL)) {
+               ret = vmw_resource_buf_alloc(res, interruptible);
+               if (unlikely(ret != 0))
+                       return ret;
+       }
+
+       INIT_LIST_HEAD(&val_list);
+       val_buf->bo = ttm_bo_reference(&res->backup->base);
+       list_add_tail(&val_buf->head, &val_list);
+       ret = ttm_eu_reserve_buffers(&val_list);
+       if (unlikely(ret != 0))
+               goto out_no_reserve;
+
+       if (res->func->needs_backup && list_empty(&res->mob_head))
+               return 0;
+
+       backup_dirty = res->backup_dirty;
+       ret = ttm_bo_validate(&res->backup->base,
+                             res->func->backup_placement,
+                             true, false);
+
+       if (unlikely(ret != 0))
+               goto out_no_validate;
+
+       return 0;
+
+out_no_validate:
+       ttm_eu_backoff_reservation(&val_list);
+out_no_reserve:
+       ttm_bo_unref(&val_buf->bo);
+       if (backup_dirty)
+               vmw_dmabuf_unreference(&res->backup);
+
+       return ret;
+}
+
+/**
+ * vmw_resource_reserve - Reserve a resource for command submission
+ *
+ * @res:            The resource to reserve.
+ *
+ * This function takes the resource off the LRU list and make sure
+ * a backup buffer is present for guest-backed resources. However,
+ * the buffer may not be bound to the resource at this point.
+ *
+ */
+int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       int ret;
+
+       write_lock(&dev_priv->resource_lock);
+       list_del_init(&res->lru_head);
+       write_unlock(&dev_priv->resource_lock);
+
+       if (res->func->needs_backup && res->backup == NULL &&
+           !no_backup) {
+               ret = vmw_resource_buf_alloc(res, true);
+               if (unlikely(ret != 0))
+                       return ret;
+       }
+
+       return 0;
+}
+
+/**
+ * vmw_resource_backoff_reservation - Unreserve and unreference a
+ *                                    backup buffer
+ *.
+ * @val_buf:        Backup buffer information.
+ */
+void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
+{
+       struct list_head val_list;
+
+       if (likely(val_buf->bo == NULL))
+               return;
+
+       INIT_LIST_HEAD(&val_list);
+       list_add_tail(&val_buf->head, &val_list);
+       ttm_eu_backoff_reservation(&val_list);
+       ttm_bo_unref(&val_buf->bo);
+}
+
+/**
+ * vmw_resource_do_evict - Evict a resource, and transfer its data
+ *                         to a backup buffer.
+ *
+ * @res:            The resource to evict.
+ */
+int vmw_resource_do_evict(struct vmw_resource *res)
+{
+       struct ttm_validate_buffer val_buf;
+       const struct vmw_res_func *func = res->func;
+       int ret;
+
+       BUG_ON(!func->may_evict);
+
+       val_buf.bo = NULL;
+       ret = vmw_resource_check_buffer(res, true, &val_buf);
+       if (unlikely(ret != 0))
+               return ret;
+
+       if (unlikely(func->unbind != NULL &&
+                    (!func->needs_backup || !list_empty(&res->mob_head)))) {
+               ret = func->unbind(res, res->res_dirty, &val_buf);
+               if (unlikely(ret != 0))
+                       goto out_no_unbind;
+               list_del_init(&res->mob_head);
+       }
+       ret = func->destroy(res);
+       res->backup_dirty = true;
+       res->res_dirty = false;
+out_no_unbind:
+       vmw_resource_backoff_reservation(&val_buf);
+
+       return ret;
+}
+
+
+/**
+ * vmw_resource_validate - Make a resource up-to-date and visible
+ *                         to the device.
+ *
+ * @res:            The resource to make visible to the device.
+ *
+ * On succesful return, any backup DMA buffer pointed to by @res->backup will
+ * be reserved and validated.
+ * On hardware resource shortage, this function will repeatedly evict
+ * resources of the same type until the validation succeeds.
+ */
+int vmw_resource_validate(struct vmw_resource *res)
+{
+       int ret;
+       struct vmw_resource *evict_res;
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
+       struct ttm_validate_buffer val_buf;
+
+       if (likely(!res->func->may_evict))
+               return 0;
+
+       val_buf.bo = NULL;
+       if (res->backup)
+               val_buf.bo = &res->backup->base;
+       do {
+               ret = vmw_resource_do_validate(res, &val_buf);
+               if (likely(ret != -EBUSY))
+                       break;
+
+               write_lock(&dev_priv->resource_lock);
+               if (list_empty(lru_list) || !res->func->may_evict) {
+                       DRM_ERROR("Out of device device id entries "
+                                 "for %s.\n", res->func->type_name);
+                       ret = -EBUSY;
+                       write_unlock(&dev_priv->resource_lock);
+                       break;
+               }
+
+               evict_res = vmw_resource_reference
+                       (list_first_entry(lru_list, struct vmw_resource,
+                                         lru_head));
+               list_del_init(&evict_res->lru_head);
+
+               write_unlock(&dev_priv->resource_lock);
+               vmw_resource_do_evict(evict_res);
+               vmw_resource_unreference(&evict_res);
+       } while (1);
+
+       if (unlikely(ret != 0))
+               goto out_no_validate;
+       else if (!res->func->needs_backup && res->backup) {
+               list_del_init(&res->mob_head);
+               vmw_dmabuf_unreference(&res->backup);
+       }
+
+       return 0;
+
+out_no_validate:
+       return ret;
+}
+
+/**
+ * vmw_fence_single_bo - Utility function to fence a single TTM buffer
+ *                       object without unreserving it.
+ *
+ * @bo:             Pointer to the struct ttm_buffer_object to fence.
+ * @fence:          Pointer to the fence. If NULL, this function will
+ *                  insert a fence into the command stream..
+ *
+ * Contrary to the ttm_eu version of this function, it takes only
+ * a single buffer object instead of a list, and it also doesn't
+ * unreserve the buffer object, which needs to be done separately.
+ */
+void vmw_fence_single_bo(struct ttm_buffer_object *bo,
+                        struct vmw_fence_obj *fence)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_bo_driver *driver = bdev->driver;
+       struct vmw_fence_obj *old_fence_obj;
+       struct vmw_private *dev_priv =
+               container_of(bdev, struct vmw_private, bdev);
+
+       if (fence == NULL)
+               vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+       else
+               driver->sync_obj_ref(fence);
+
+       spin_lock(&bdev->fence_lock);
+
+       old_fence_obj = bo->sync_obj;
+       bo->sync_obj = fence;
+
+       spin_unlock(&bdev->fence_lock);
+
+       if (old_fence_obj)
+               vmw_fence_obj_unreference(&old_fence_obj);
+}
+
+/**
+ * vmw_resource_move_notify - TTM move_notify_callback
+ *
+ * @bo:             The TTM buffer object about to move.
+ * @mem:            The truct ttm_mem_reg indicating to what memory
+ *                  region the move is taking place.
+ *
+ * For now does nothing.
+ */
+void vmw_resource_move_notify(struct ttm_buffer_object *bo,
+                             struct ttm_mem_reg *mem)
+{
+}
+
+/**
+ * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
+ *
+ * @res:            The resource being queried.
+ */
+bool vmw_resource_needs_backup(const struct vmw_resource *res)
+{
+       return res->func->needs_backup;
+}
+
+/**
+ * vmw_resource_evict_type - Evict all resources of a specific type
+ *
+ * @dev_priv:       Pointer to a device private struct
+ * @type:           The resource type to evict
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+ * evict all evictable resources of a specific type.
+ */
+static void vmw_resource_evict_type(struct vmw_private *dev_priv,
+                                   enum vmw_res_type type)
+{
+       struct list_head *lru_list = &dev_priv->res_lru[type];
+       struct vmw_resource *evict_res;
+
+       do {
+               write_lock(&dev_priv->resource_lock);
+
+               if (list_empty(lru_list))
+                       goto out_unlock;
+
+               evict_res = vmw_resource_reference(
+                       list_first_entry(lru_list, struct vmw_resource,
+                                        lru_head));
+               list_del_init(&evict_res->lru_head);
+               write_unlock(&dev_priv->resource_lock);
+               vmw_resource_do_evict(evict_res);
+               vmw_resource_unreference(&evict_res);
+       } while (1);
+
+out_unlock:
+       write_unlock(&dev_priv->resource_lock);
+}
+
+/**
+ * vmw_resource_evict_all - Evict all evictable resources
+ *
+ * @dev_priv:       Pointer to a device private struct
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+ * evict all evictable resources. In particular this means that all
+ * guest-backed resources that are registered with the device are
+ * evicted and the OTable becomes clean.
+ */
+void vmw_resource_evict_all(struct vmw_private *dev_priv)
+{
+       enum vmw_res_type type;
+
+       mutex_lock(&dev_priv->cmdbuf_mutex);
+
+       for (type = 0; type < vmw_res_max; ++type)
+               vmw_resource_evict_type(dev_priv, type);
+
+       mutex_unlock(&dev_priv->cmdbuf_mutex);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
new file mode 100644 (file)
index 0000000..f3adeed
--- /dev/null
@@ -0,0 +1,84 @@
+/**************************************************************************
+ *
+ * Copyright Â© 2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _VMWGFX_RESOURCE_PRIV_H_
+#define _VMWGFX_RESOURCE_PRIV_H_
+
+#include "vmwgfx_drv.h"
+
+/**
+ * struct vmw_user_resource_conv - Identify a derived user-exported resource
+ * type and provide a function to convert its ttm_base_object pointer to
+ * a struct vmw_resource
+ */
+struct vmw_user_resource_conv {
+       enum ttm_object_type object_type;
+       struct vmw_resource *(*base_obj_to_res)(struct ttm_base_object *base);
+       void (*res_free) (struct vmw_resource *res);
+};
+
+/**
+ * struct vmw_res_func - members and functions common for a resource type
+ *
+ * @res_type:          Enum that identifies the lru list to use for eviction.
+ * @needs_backup:      Whether the resource is guest-backed and needs
+ *                     persistent buffer storage.
+ * @type_name:         String that identifies the resource type.
+ * @backup_placement:  TTM placement for backup buffers.
+ * @may_evict          Whether the resource may be evicted.
+ * @create:            Create a hardware resource.
+ * @destroy:           Destroy a hardware resource.
+ * @bind:              Bind a hardware resource to persistent buffer storage.
+ * @unbind:            Unbind a hardware resource from persistent
+ *                     buffer storage.
+ */
+
+struct vmw_res_func {
+       enum vmw_res_type res_type;
+       bool needs_backup;
+       const char *type_name;
+       struct ttm_placement *backup_placement;
+       bool may_evict;
+
+       int (*create) (struct vmw_resource *res);
+       int (*destroy) (struct vmw_resource *res);
+       int (*bind) (struct vmw_resource *res,
+                    struct ttm_validate_buffer *val_buf);
+       int (*unbind) (struct vmw_resource *res,
+                      bool readback,
+                      struct ttm_validate_buffer *val_buf);
+};
+
+int vmw_resource_alloc_id(struct vmw_resource *res);
+void vmw_resource_release_id(struct vmw_resource *res);
+int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
+                     bool delay_id,
+                     void (*res_free) (struct vmw_resource *res),
+                     const struct vmw_res_func *func);
+void vmw_resource_activate(struct vmw_resource *res,
+                          void (*hw_destroy) (struct vmw_resource *));
+#endif
index 60f1285..26387c3 100644 (file)
@@ -468,7 +468,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
 
        drm_mode_crtc_set_gamma_size(crtc, 256);
 
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                      dev->mode_config.dirty_info_property,
                                      1);
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
new file mode 100644 (file)
index 0000000..5828143
--- /dev/null
@@ -0,0 +1,893 @@
+/**************************************************************************
+ *
+ * Copyright Â© 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include <ttm/ttm_placement.h>
+#include "svga3d_surfacedefs.h"
+
+/**
+ * struct vmw_user_surface - User-space visible surface resource
+ *
+ * @base:           The TTM base object handling user-space visibility.
+ * @srf:            The surface metadata.
+ * @size:           TTM accounting size for the surface.
+ */
+struct vmw_user_surface {
+       struct ttm_base_object base;
+       struct vmw_surface srf;
+       uint32_t size;
+       uint32_t backup_handle;
+};
+
+/**
+ * struct vmw_surface_offset - Backing store mip level offset info
+ *
+ * @face:           Surface face.
+ * @mip:            Mip level.
+ * @bo_offset:      Offset into backing store of this mip level.
+ *
+ */
+struct vmw_surface_offset {
+       uint32_t face;
+       uint32_t mip;
+       uint32_t bo_offset;
+};
+
+static void vmw_user_surface_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_surface_base_to_res(struct ttm_base_object *base);
+static int vmw_legacy_srf_bind(struct vmw_resource *res,
+                              struct ttm_validate_buffer *val_buf);
+static int vmw_legacy_srf_unbind(struct vmw_resource *res,
+                                bool readback,
+                                struct ttm_validate_buffer *val_buf);
+static int vmw_legacy_srf_create(struct vmw_resource *res);
+static int vmw_legacy_srf_destroy(struct vmw_resource *res);
+
+static const struct vmw_user_resource_conv user_surface_conv = {
+       .object_type = VMW_RES_SURFACE,
+       .base_obj_to_res = vmw_user_surface_base_to_res,
+       .res_free = vmw_user_surface_free
+};
+
+const struct vmw_user_resource_conv *user_surface_converter =
+       &user_surface_conv;
+
+
+static uint64_t vmw_user_surface_size;
+
+static const struct vmw_res_func vmw_legacy_surface_func = {
+       .res_type = vmw_res_surface,
+       .needs_backup = false,
+       .may_evict = true,
+       .type_name = "legacy surfaces",
+       .backup_placement = &vmw_srf_placement,
+       .create = &vmw_legacy_srf_create,
+       .destroy = &vmw_legacy_srf_destroy,
+       .bind = &vmw_legacy_srf_bind,
+       .unbind = &vmw_legacy_srf_unbind
+};
+
+/**
+ * struct vmw_surface_dma - SVGA3D DMA command
+ */
+struct vmw_surface_dma {
+       SVGA3dCmdHeader header;
+       SVGA3dCmdSurfaceDMA body;
+       SVGA3dCopyBox cb;
+       SVGA3dCmdSurfaceDMASuffix suffix;
+};
+
+/**
+ * struct vmw_surface_define - SVGA3D Surface Define command
+ */
+struct vmw_surface_define {
+       SVGA3dCmdHeader header;
+       SVGA3dCmdDefineSurface body;
+};
+
+/**
+ * struct vmw_surface_destroy - SVGA3D Surface Destroy command
+ */
+struct vmw_surface_destroy {
+       SVGA3dCmdHeader header;
+       SVGA3dCmdDestroySurface body;
+};
+
+
+/**
+ * vmw_surface_dma_size - Compute fifo size for a dma command.
+ *
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * Computes the required size for a surface dma command for backup or
+ * restoration of the surface represented by @srf.
+ */
+static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
+{
+       return srf->num_sizes * sizeof(struct vmw_surface_dma);
+}
+
+
+/**
+ * vmw_surface_define_size - Compute fifo size for a surface define command.
+ *
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * Computes the required size for a surface define command for the definition
+ * of the surface represented by @srf.
+ */
+static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
+{
+       return sizeof(struct vmw_surface_define) + srf->num_sizes *
+               sizeof(SVGA3dSize);
+}
+
+
+/**
+ * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
+ *
+ * Computes the required size for a surface destroy command for the destruction
+ * of a hw surface.
+ */
+static inline uint32_t vmw_surface_destroy_size(void)
+{
+       return sizeof(struct vmw_surface_destroy);
+}
+
+/**
+ * vmw_surface_destroy_encode - Encode a surface_destroy command.
+ *
+ * @id: The surface id
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static void vmw_surface_destroy_encode(uint32_t id,
+                                      void *cmd_space)
+{
+       struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
+               cmd_space;
+
+       cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.sid = id;
+}
+
+/**
+ * vmw_surface_define_encode - Encode a surface_define command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static void vmw_surface_define_encode(const struct vmw_surface *srf,
+                                     void *cmd_space)
+{
+       struct vmw_surface_define *cmd = (struct vmw_surface_define *)
+               cmd_space;
+       struct drm_vmw_size *src_size;
+       SVGA3dSize *cmd_size;
+       uint32_t cmd_len;
+       int i;
+
+       cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
+
+       cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
+       cmd->header.size = cmd_len;
+       cmd->body.sid = srf->res.id;
+       cmd->body.surfaceFlags = srf->flags;
+       cmd->body.format = cpu_to_le32(srf->format);
+       for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+               cmd->body.face[i].numMipLevels = srf->mip_levels[i];
+
+       cmd += 1;
+       cmd_size = (SVGA3dSize *) cmd;
+       src_size = srf->sizes;
+
+       for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
+               cmd_size->width = src_size->width;
+               cmd_size->height = src_size->height;
+               cmd_size->depth = src_size->depth;
+       }
+}
+
+/**
+ * vmw_surface_dma_encode - Encode a surface_dma command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
+ * should be placed or read from.
+ * @to_surface: Boolean whether to DMA to the surface or from the surface.
+ */
+static void vmw_surface_dma_encode(struct vmw_surface *srf,
+                                  void *cmd_space,
+                                  const SVGAGuestPtr *ptr,
+                                  bool to_surface)
+{
+       uint32_t i;
+       struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
+       const struct svga3d_surface_desc *desc =
+               svga3dsurface_get_desc(srf->format);
+
+       for (i = 0; i < srf->num_sizes; ++i) {
+               SVGA3dCmdHeader *header = &cmd->header;
+               SVGA3dCmdSurfaceDMA *body = &cmd->body;
+               SVGA3dCopyBox *cb = &cmd->cb;
+               SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
+               const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
+               const struct drm_vmw_size *cur_size = &srf->sizes[i];
+
+               header->id = SVGA_3D_CMD_SURFACE_DMA;
+               header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
+
+               body->guest.ptr = *ptr;
+               body->guest.ptr.offset += cur_offset->bo_offset;
+               body->guest.pitch = svga3dsurface_calculate_pitch(desc,
+                                                                 cur_size);
+               body->host.sid = srf->res.id;
+               body->host.face = cur_offset->face;
+               body->host.mipmap = cur_offset->mip;
+               body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
+                                 SVGA3D_READ_HOST_VRAM);
+               cb->x = 0;
+               cb->y = 0;
+               cb->z = 0;
+               cb->srcx = 0;
+               cb->srcy = 0;
+               cb->srcz = 0;
+               cb->w = cur_size->width;
+               cb->h = cur_size->height;
+               cb->d = cur_size->depth;
+
+               suffix->suffixSize = sizeof(*suffix);
+               suffix->maximumOffset =
+                       svga3dsurface_get_image_buffer_size(desc, cur_size,
+                                                           body->guest.pitch);
+               suffix->flags.discard = 0;
+               suffix->flags.unsynchronized = 0;
+               suffix->flags.reserved = 0;
+               ++cmd;
+       }
+};
+
+
+/**
+ * vmw_hw_surface_destroy - destroy a Device surface
+ *
+ * @res:        Pointer to a struct vmw_resource embedded in a struct
+ *              vmw_surface.
+ *
+ * Destroys a the device surface associated with a struct vmw_surface if
+ * any, and adjusts accounting and resource count accordingly.
+ */
+static void vmw_hw_surface_destroy(struct vmw_resource *res)
+{
+
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct vmw_surface *srf;
+       void *cmd;
+
+       if (res->id != -1) {
+
+               cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
+               if (unlikely(cmd == NULL)) {
+                       DRM_ERROR("Failed reserving FIFO space for surface "
+                                 "destruction.\n");
+                       return;
+               }
+
+               vmw_surface_destroy_encode(res->id, cmd);
+               vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
+
+               /*
+                * used_memory_size_atomic, or separate lock
+                * to avoid taking dev_priv::cmdbuf_mutex in
+                * the destroy path.
+                */
+
+               mutex_lock(&dev_priv->cmdbuf_mutex);
+               srf = vmw_res_to_srf(res);
+               dev_priv->used_memory_size -= res->backup_size;
+               mutex_unlock(&dev_priv->cmdbuf_mutex);
+       }
+       vmw_3d_resource_dec(dev_priv, false);
+}
+
+/**
+ * vmw_legacy_srf_create - Create a device surface as part of the
+ * resource validation process.
+ *
+ * @res: Pointer to a struct vmw_surface.
+ *
+ * If the surface doesn't have a hw id.
+ *
+ * Returns -EBUSY if there wasn't sufficient device resources to
+ * complete the validation. Retry after freeing up resources.
+ *
+ * May return other errors if the kernel is out of guest resources.
+ */
+static int vmw_legacy_srf_create(struct vmw_resource *res)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct vmw_surface *srf;
+       uint32_t submit_size;
+       uint8_t *cmd;
+       int ret;
+
+       if (likely(res->id != -1))
+               return 0;
+
+       srf = vmw_res_to_srf(res);
+       if (unlikely(dev_priv->used_memory_size + res->backup_size >=
+                    dev_priv->memory_size))
+               return -EBUSY;
+
+       /*
+        * Alloc id for the resource.
+        */
+
+       ret = vmw_resource_alloc_id(res);
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Failed to allocate a surface id.\n");
+               goto out_no_id;
+       }
+
+       if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
+               ret = -EBUSY;
+               goto out_no_fifo;
+       }
+
+       /*
+        * Encode surface define- commands.
+        */
+
+       submit_size = vmw_surface_define_size(srf);
+       cmd = vmw_fifo_reserve(dev_priv, submit_size);
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for surface "
+                         "creation.\n");
+               ret = -ENOMEM;
+               goto out_no_fifo;
+       }
+
+       vmw_surface_define_encode(srf, cmd);
+       vmw_fifo_commit(dev_priv, submit_size);
+       /*
+        * Surface memory usage accounting.
+        */
+
+       dev_priv->used_memory_size += res->backup_size;
+       return 0;
+
+out_no_fifo:
+       vmw_resource_release_id(res);
+out_no_id:
+       return ret;
+}
+
+/**
+ * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ * @val_buf:        Pointer to a struct ttm_validate_buffer containing
+ *                  information about the backup buffer.
+ * @bind:           Boolean wether to DMA to the surface.
+ *
+ * Transfer backup data to or from a legacy surface as part of the
+ * validation process.
+ * May return other errors if the kernel is out of guest resources.
+ * The backup buffer will be fenced or idle upon successful completion,
+ * and if the surface needs persistent backup storage, the backup buffer
+ * will also be returned reserved iff @bind is true.
+ */
+static int vmw_legacy_srf_dma(struct vmw_resource *res,
+                             struct ttm_validate_buffer *val_buf,
+                             bool bind)
+{
+       SVGAGuestPtr ptr;
+       struct vmw_fence_obj *fence;
+       uint32_t submit_size;
+       struct vmw_surface *srf = vmw_res_to_srf(res);
+       uint8_t *cmd;
+       struct vmw_private *dev_priv = res->dev_priv;
+
+       BUG_ON(val_buf->bo == NULL);
+
+       submit_size = vmw_surface_dma_size(srf);
+       cmd = vmw_fifo_reserve(dev_priv, submit_size);
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for surface "
+                         "DMA.\n");
+               return -ENOMEM;
+       }
+       vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
+       vmw_surface_dma_encode(srf, cmd, &ptr, bind);
+
+       vmw_fifo_commit(dev_priv, submit_size);
+
+       /*
+        * Create a fence object and fence the backup buffer.
+        */
+
+       (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+                                         &fence, NULL);
+
+       vmw_fence_single_bo(val_buf->bo, fence);
+
+       if (likely(fence != NULL))
+               vmw_fence_obj_unreference(&fence);
+
+       return 0;
+}
+
+/**
+ * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
+ *                       surface validation process.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ * @val_buf:        Pointer to a struct ttm_validate_buffer containing
+ *                  information about the backup buffer.
+ *
+ * This function will copy backup data to the surface if the
+ * backup buffer is dirty.
+ */
+static int vmw_legacy_srf_bind(struct vmw_resource *res,
+                              struct ttm_validate_buffer *val_buf)
+{
+       if (!res->backup_dirty)
+               return 0;
+
+       return vmw_legacy_srf_dma(res, val_buf, true);
+}
+
+
+/**
+ * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
+ *                         surface eviction process.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ * @val_buf:        Pointer to a struct ttm_validate_buffer containing
+ *                  information about the backup buffer.
+ *
+ * This function will copy backup data from the surface.
+ */
+static int vmw_legacy_srf_unbind(struct vmw_resource *res,
+                                bool readback,
+                                struct ttm_validate_buffer *val_buf)
+{
+       if (unlikely(readback))
+               return vmw_legacy_srf_dma(res, val_buf, false);
+       return 0;
+}
+
+/**
+ * vmw_legacy_srf_destroy - Destroy a device surface as part of a
+ *                          resource eviction process.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ */
+static int vmw_legacy_srf_destroy(struct vmw_resource *res)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       uint32_t submit_size;
+       uint8_t *cmd;
+
+       BUG_ON(res->id == -1);
+
+       /*
+        * Encode the dma- and surface destroy commands.
+        */
+
+       submit_size = vmw_surface_destroy_size();
+       cmd = vmw_fifo_reserve(dev_priv, submit_size);
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for surface "
+                         "eviction.\n");
+               return -ENOMEM;
+       }
+
+       vmw_surface_destroy_encode(res->id, cmd);
+       vmw_fifo_commit(dev_priv, submit_size);
+
+       /*
+        * Surface memory usage accounting.
+        */
+
+       dev_priv->used_memory_size -= res->backup_size;
+
+       /*
+        * Release the surface ID.
+        */
+
+       vmw_resource_release_id(res);
+
+       return 0;
+}
+
+
+/**
+ * vmw_surface_init - initialize a struct vmw_surface
+ *
+ * @dev_priv:       Pointer to a device private struct.
+ * @srf:            Pointer to the struct vmw_surface to initialize.
+ * @res_free:       Pointer to a resource destructor used to free
+ *                  the object.
+ */
+static int vmw_surface_init(struct vmw_private *dev_priv,
+                           struct vmw_surface *srf,
+                           void (*res_free) (struct vmw_resource *res))
+{
+       int ret;
+       struct vmw_resource *res = &srf->res;
+
+       BUG_ON(res_free == NULL);
+       (void) vmw_3d_resource_inc(dev_priv, false);
+       ret = vmw_resource_init(dev_priv, res, true, res_free,
+                               &vmw_legacy_surface_func);
+
+       if (unlikely(ret != 0)) {
+               vmw_3d_resource_dec(dev_priv, false);
+               res_free(res);
+               return ret;
+       }
+
+       /*
+        * The surface won't be visible to hardware until a
+        * surface validate.
+        */
+
+       vmw_resource_activate(res, vmw_hw_surface_destroy);
+       return ret;
+}
+
+/**
+ * vmw_user_surface_base_to_res - TTM base object to resource converter for
+ *                                user visible surfaces
+ *
+ * @base:           Pointer to a TTM base object
+ *
+ * Returns the struct vmw_resource embedded in a struct vmw_surface
+ * for the user-visible object identified by the TTM base object @base.
+ */
+static struct vmw_resource *
+vmw_user_surface_base_to_res(struct ttm_base_object *base)
+{
+       return &(container_of(base, struct vmw_user_surface, base)->srf.res);
+}
+
+/**
+ * vmw_user_surface_free - User visible surface resource destructor
+ *
+ * @res:            A struct vmw_resource embedded in a struct vmw_surface.
+ */
+static void vmw_user_surface_free(struct vmw_resource *res)
+{
+       struct vmw_surface *srf = vmw_res_to_srf(res);
+       struct vmw_user_surface *user_srf =
+           container_of(srf, struct vmw_user_surface, srf);
+       struct vmw_private *dev_priv = srf->res.dev_priv;
+       uint32_t size = user_srf->size;
+
+       kfree(srf->offsets);
+       kfree(srf->sizes);
+       kfree(srf->snooper.image);
+       ttm_base_object_kfree(user_srf, base);
+       ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_user_surface_free - User visible surface TTM base object destructor
+ *
+ * @p_base:         Pointer to a pointer to a TTM base object
+ *                  embedded in a struct vmw_user_surface.
+ *
+ * Drops the base object's reference on its resource, and the
+ * pointer pointed to by *p_base is set to NULL.
+ */
+static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
+{
+       struct ttm_base_object *base = *p_base;
+       struct vmw_user_surface *user_srf =
+           container_of(base, struct vmw_user_surface, base);
+       struct vmw_resource *res = &user_srf->srf.res;
+
+       *p_base = NULL;
+       vmw_resource_unreference(&res);
+}
+
+/**
+ * vmw_user_surface_destroy_ioctl - Ioctl function implementing
+ *                                  the user surface destroy functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
+int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv)
+{
+       struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+       return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
+}
+
+/**
+ * vmw_user_surface_define_ioctl - Ioctl function implementing
+ *                                  the user surface define functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
+int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+                            struct drm_file *file_priv)
+{
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       struct vmw_user_surface *user_srf;
+       struct vmw_surface *srf;
+       struct vmw_resource *res;
+       struct vmw_resource *tmp;
+       union drm_vmw_surface_create_arg *arg =
+           (union drm_vmw_surface_create_arg *)data;
+       struct drm_vmw_surface_create_req *req = &arg->req;
+       struct drm_vmw_surface_arg *rep = &arg->rep;
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       struct drm_vmw_size __user *user_sizes;
+       int ret;
+       int i, j;
+       uint32_t cur_bo_offset;
+       struct drm_vmw_size *cur_size;
+       struct vmw_surface_offset *cur_offset;
+       uint32_t num_sizes;
+       uint32_t size;
+       struct vmw_master *vmaster = vmw_master(file_priv->master);
+       const struct svga3d_surface_desc *desc;
+
+       if (unlikely(vmw_user_surface_size == 0))
+               vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+                       128;
+
+       num_sizes = 0;
+       for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+               num_sizes += req->mip_levels[i];
+
+       if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
+           DRM_VMW_MAX_MIP_LEVELS)
+               return -EINVAL;
+
+       size = vmw_user_surface_size + 128 +
+               ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
+               ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
+
+
+       desc = svga3dsurface_get_desc(req->format);
+       if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
+               DRM_ERROR("Invalid surface format for surface creation.\n");
+               return -EINVAL;
+       }
+
+       ret = ttm_read_lock(&vmaster->lock, true);
+       if (unlikely(ret != 0))
+               return ret;
+
+       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+                                  size, false, true);
+       if (unlikely(ret != 0)) {
+               if (ret != -ERESTARTSYS)
+                       DRM_ERROR("Out of graphics memory for surface"
+                                 " creation.\n");
+               goto out_unlock;
+       }
+
+       user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+       if (unlikely(user_srf == NULL)) {
+               ret = -ENOMEM;
+               goto out_no_user_srf;
+       }
+
+       srf = &user_srf->srf;
+       res = &srf->res;
+
+       srf->flags = req->flags;
+       srf->format = req->format;
+       srf->scanout = req->scanout;
+
+       memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
+       srf->num_sizes = num_sizes;
+       user_srf->size = size;
+
+       srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
+       if (unlikely(srf->sizes == NULL)) {
+               ret = -ENOMEM;
+               goto out_no_sizes;
+       }
+       srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
+                              GFP_KERNEL);
+       if (unlikely(srf->sizes == NULL)) {
+               ret = -ENOMEM;
+               goto out_no_offsets;
+       }
+
+       user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+           req->size_addr;
+
+       ret = copy_from_user(srf->sizes, user_sizes,
+                            srf->num_sizes * sizeof(*srf->sizes));
+       if (unlikely(ret != 0)) {
+               ret = -EFAULT;
+               goto out_no_copy;
+       }
+
+       srf->base_size = *srf->sizes;
+       srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+       srf->multisample_count = 1;
+
+       cur_bo_offset = 0;
+       cur_offset = srf->offsets;
+       cur_size = srf->sizes;
+
+       for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+               for (j = 0; j < srf->mip_levels[i]; ++j) {
+                       uint32_t stride = svga3dsurface_calculate_pitch
+                               (desc, cur_size);
+
+                       cur_offset->face = i;
+                       cur_offset->mip = j;
+                       cur_offset->bo_offset = cur_bo_offset;
+                       cur_bo_offset += svga3dsurface_get_image_buffer_size
+                               (desc, cur_size, stride);
+                       ++cur_offset;
+                       ++cur_size;
+               }
+       }
+       res->backup_size = cur_bo_offset;
+       if (srf->scanout &&
+           srf->num_sizes == 1 &&
+           srf->sizes[0].width == 64 &&
+           srf->sizes[0].height == 64 &&
+           srf->format == SVGA3D_A8R8G8B8) {
+
+               srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
+               /* clear the image */
+               if (srf->snooper.image) {
+                       memset(srf->snooper.image, 0x00, 64 * 64 * 4);
+               } else {
+                       DRM_ERROR("Failed to allocate cursor_image\n");
+                       ret = -ENOMEM;
+                       goto out_no_copy;
+               }
+       } else {
+               srf->snooper.image = NULL;
+       }
+       srf->snooper.crtc = NULL;
+
+       user_srf->base.shareable = false;
+       user_srf->base.tfile = NULL;
+
+       /**
+        * From this point, the generic resource management functions
+        * destroy the object on failure.
+        */
+
+       ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+       if (unlikely(ret != 0))
+               goto out_unlock;
+
+       tmp = vmw_resource_reference(&srf->res);
+       ret = ttm_base_object_init(tfile, &user_srf->base,
+                                  req->shareable, VMW_RES_SURFACE,
+                                  &vmw_user_surface_base_release, NULL);
+
+       if (unlikely(ret != 0)) {
+               vmw_resource_unreference(&tmp);
+               vmw_resource_unreference(&res);
+               goto out_unlock;
+       }
+
+       rep->sid = user_srf->base.hash.key;
+       vmw_resource_unreference(&res);
+
+       ttm_read_unlock(&vmaster->lock);
+       return 0;
+out_no_copy:
+       kfree(srf->offsets);
+out_no_offsets:
+       kfree(srf->sizes);
+out_no_sizes:
+       ttm_base_object_kfree(user_srf, base);
+out_no_user_srf:
+       ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+out_unlock:
+       ttm_read_unlock(&vmaster->lock);
+       return ret;
+}
+
+/**
+ * vmw_user_surface_define_ioctl - Ioctl function implementing
+ *                                  the user surface reference functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
+int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv)
+{
+       union drm_vmw_surface_reference_arg *arg =
+           (union drm_vmw_surface_reference_arg *)data;
+       struct drm_vmw_surface_arg *req = &arg->req;
+       struct drm_vmw_surface_create_req *rep = &arg->rep;
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       struct vmw_surface *srf;
+       struct vmw_user_surface *user_srf;
+       struct drm_vmw_size __user *user_sizes;
+       struct ttm_base_object *base;
+       int ret = -EINVAL;
+
+       base = ttm_base_object_lookup(tfile, req->sid);
+       if (unlikely(base == NULL)) {
+               DRM_ERROR("Could not find surface to reference.\n");
+               return -EINVAL;
+       }
+
+       if (unlikely(base->object_type != VMW_RES_SURFACE))
+               goto out_bad_resource;
+
+       user_srf = container_of(base, struct vmw_user_surface, base);
+       srf = &user_srf->srf;
+
+       ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Could not add a reference to a surface.\n");
+               goto out_no_reference;
+       }
+
+       rep->flags = srf->flags;
+       rep->format = srf->format;
+       memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
+       user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+           rep->size_addr;
+
+       if (user_sizes)
+               ret = copy_to_user(user_sizes, srf->sizes,
+                                  srf->num_sizes * sizeof(*srf->sizes));
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("copy_to_user failed %p %u\n",
+                         user_sizes, srf->num_sizes);
+               ret = -EFAULT;
+       }
+out_bad_resource:
+out_no_reference:
+       ttm_base_object_unref(&base);
+
+       return ret;
+}
index f676c01..6fcd466 100644 (file)
@@ -46,9 +46,9 @@ static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                rdesc[559] = 0x45;
        }
        /* the same as above (s/usage/physical/) */
-       if ((quirks & MS_RDESC_3K) && *rsize == 106 &&
-                       !memcmp((char []){ 0x19, 0x00, 0x29, 0xff },
-                               &rdesc[94], 4)) {
+       if ((quirks & MS_RDESC_3K) && *rsize == 106 && rdesc[94] == 0x19 &&
+                       rdesc[95] == 0x00 && rdesc[96] == 0x29 &&
+                       rdesc[97] == 0xff) {
                rdesc[94] = 0x35;
                rdesc[96] = 0x45;
        }
index 17d15bb..7c47fc3 100644 (file)
@@ -42,7 +42,6 @@ static struct cdev hidraw_cdev;
 static struct class *hidraw_class;
 static struct hidraw *hidraw_table[HIDRAW_MAX_DEVICES];
 static DEFINE_MUTEX(minors_lock);
-static void drop_ref(struct hidraw *hid, int exists_bit);
 
 static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
 {
@@ -114,7 +113,7 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer,
        __u8 *buf;
        int ret = 0;
 
-       if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
+       if (!hidraw_table[minor]) {
                ret = -ENODEV;
                goto out;
        }
@@ -262,7 +261,7 @@ static int hidraw_open(struct inode *inode, struct file *file)
        }
 
        mutex_lock(&minors_lock);
-       if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
+       if (!hidraw_table[minor]) {
                err = -ENODEV;
                goto out_unlock;
        }
@@ -299,12 +298,36 @@ out:
 static int hidraw_release(struct inode * inode, struct file * file)
 {
        unsigned int minor = iminor(inode);
+       struct hidraw *dev;
        struct hidraw_list *list = file->private_data;
+       int ret;
+       int i;
+
+       mutex_lock(&minors_lock);
+       if (!hidraw_table[minor]) {
+               ret = -ENODEV;
+               goto unlock;
+       }
 
-       drop_ref(hidraw_table[minor], 0);
        list_del(&list->node);
+       dev = hidraw_table[minor];
+       if (!--dev->open) {
+               if (list->hidraw->exist) {
+                       hid_hw_power(dev->hid, PM_HINT_NORMAL);
+                       hid_hw_close(dev->hid);
+               } else {
+                       kfree(list->hidraw);
+               }
+       }
+
+       for (i = 0; i < HIDRAW_BUFFER_SIZE; ++i)
+               kfree(list->buffer[i].value);
        kfree(list);
-       return 0;
+       ret = 0;
+unlock:
+       mutex_unlock(&minors_lock);
+
+       return ret;
 }
 
 static long hidraw_ioctl(struct file *file, unsigned int cmd,
@@ -506,7 +529,21 @@ EXPORT_SYMBOL_GPL(hidraw_connect);
 void hidraw_disconnect(struct hid_device *hid)
 {
        struct hidraw *hidraw = hid->hidraw;
-       drop_ref(hidraw, 1);
+
+       mutex_lock(&minors_lock);
+       hidraw->exist = 0;
+
+       device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
+
+       hidraw_table[hidraw->minor] = NULL;
+
+       if (hidraw->open) {
+               hid_hw_close(hid);
+               wake_up_interruptible(&hidraw->wait);
+       } else {
+               kfree(hidraw);
+       }
+       mutex_unlock(&minors_lock);
 }
 EXPORT_SYMBOL_GPL(hidraw_disconnect);
 
@@ -555,23 +592,3 @@ void hidraw_exit(void)
        unregister_chrdev_region(dev_id, HIDRAW_MAX_DEVICES);
 
 }
-
-static void drop_ref(struct hidraw *hidraw, int exists_bit)
-{
-       mutex_lock(&minors_lock);
-       if (exists_bit) {
-               hid_hw_close(hidraw->hid);
-               hidraw->exist = 0;
-               if (hidraw->open)
-                       wake_up_interruptible(&hidraw->wait);
-       } else {
-               --hidraw->open;
-       }
-
-       if (!hidraw->open && !hidraw->exist) {
-               device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
-               hidraw_table[hidraw->minor] = NULL;
-               kfree(hidraw);
-       }
-       mutex_unlock(&minors_lock);
-}
index a227be4..520e5bf 100644 (file)
@@ -32,7 +32,7 @@
  * ASB100-A supports pwm1, while plain ASB100 does not.  There is no known
  * way for the driver to tell which one is there.
  *
- * Chip        #vin    #fanin  #pwm    #temp   wchipid vendid  i2c     ISA
+ * Chip                #vin    #fanin  #pwm    #temp   wchipid vendid  i2c     ISA
  * asb100      7       3       1       4       0x31    0x0694  yes     no
  */
 
index 1821b74..de3c7e0 100644 (file)
@@ -2083,6 +2083,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
        mutex_init(&data->lock);
        mutex_init(&data->update_lock);
        data->name = w83627ehf_device_names[sio_data->kind];
+       data->bank = 0xff;              /* Force initial bank selection */
        platform_set_drvdata(pdev, data);
 
        /* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */
index 5b1a6a6..af15899 100644 (file)
@@ -25,7 +25,7 @@
 /*
  * Supports following chips:
  *
- * Chip        #vin    #fanin  #pwm    #temp   wchipid vendid  i2c     ISA
+ * Chip                #vin    #fanin  #pwm    #temp   wchipid vendid  i2c     ISA
  * w83627hf    9       3       2       3       0x20    0x5ca3  no      yes(LPC)
  * w83627thf   7       3       3       3       0x90    0x5ca3  no      yes(LPC)
  * w83637hf    7       3       3       3       0x80    0x5ca3  no      yes(LPC)
index 5a5046d..20f11d3 100644 (file)
@@ -24,7 +24,7 @@
 /*
  * Supports following chips:
  *
- * Chip        #vin    #fanin  #pwm    #temp   wchipid vendid  i2c     ISA
+ * Chip                #vin    #fanin  #pwm    #temp   wchipid vendid  i2c     ISA
  * as99127f    7       3       0       3       0x31    0x12c3  yes     no
  * as99127f rev.2 (type_name = as99127f)       0x31    0x5ca3  yes     no
  * w83781d     7       3       0       3       0x10-1  0x5ca3  yes     yes
index 39ab7bc..ed397c6 100644 (file)
@@ -22,7 +22,7 @@
 /*
  * Supports following chips:
  *
- * Chip        #vin    #fanin  #pwm    #temp   wchipid vendid  i2c     ISA
+ * Chip                #vin    #fanin  #pwm    #temp   wchipid vendid  i2c     ISA
  * w83791d     10      5       5       3       0x71    0x5ca3  yes     no
  *
  * The w83791d chip appears to be part way between the 83781d and the
index 0536452..301942d 100644 (file)
@@ -31,7 +31,7 @@
 /*
  * Supports following chips:
  *
- * Chip        #vin    #fanin  #pwm    #temp   wchipid vendid  i2c     ISA
+ * Chip                #vin    #fanin  #pwm    #temp   wchipid vendid  i2c     ISA
  * w83792d     9       7       7       3       0x7a    0x5ca3  yes     no
  */
 
index f0e8286..79710bc 100644 (file)
@@ -20,7 +20,7 @@
 /*
  * Supports following chips:
  *
- * Chip        #vin    #fanin  #pwm    #temp   wchipid vendid  i2c     ISA
+ * Chip                #vin    #fanin  #pwm    #temp   wchipid vendid  i2c     ISA
  * w83l786ng   3       2       2       2       0x7b    0x5ca3  yes     no
  */
 
index aa59a25..c02bf20 100644 (file)
@@ -39,6 +39,7 @@
 #define        AT91_TWI_STOP           0x0002  /* Send a Stop Condition */
 #define        AT91_TWI_MSEN           0x0004  /* Master Transfer Enable */
 #define        AT91_TWI_SVDIS          0x0020  /* Slave Transfer Disable */
+#define        AT91_TWI_QUICK          0x0040  /* SMBus quick command */
 #define        AT91_TWI_SWRST          0x0080  /* Software Reset */
 
 #define        AT91_TWI_MMR            0x0004  /* Master Mode Register */
@@ -212,7 +213,11 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
 
        INIT_COMPLETION(dev->cmd_complete);
        dev->transfer_status = 0;
-       if (dev->msg->flags & I2C_M_RD) {
+
+       if (!dev->buf_len) {
+               at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
+               at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
+       } else if (dev->msg->flags & I2C_M_RD) {
                unsigned start_flags = AT91_TWI_START;
 
                if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) {
index 286ca19..0670da7 100644 (file)
@@ -287,12 +287,14 @@ read_init_dma_fail:
 select_init_dma_fail:
        dma_unmap_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE);
 select_init_pio_fail:
+       dmaengine_terminate_all(i2c->dmach);
        return -EINVAL;
 
 /* Write failpath. */
 write_init_dma_fail:
        dma_unmap_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE);
 write_init_pio_fail:
+       dmaengine_terminate_all(i2c->dmach);
        return -EINVAL;
 }
 
index db31eae..3525c9e 100644 (file)
@@ -43,7 +43,6 @@
 #include <linux/slab.h>
 #include <linux/i2c-omap.h>
 #include <linux/pm_runtime.h>
-#include <linux/pm_qos.h>
 
 /* I2C controller revisions */
 #define OMAP_I2C_OMAP1_REV_2           0x20
@@ -187,8 +186,9 @@ struct omap_i2c_dev {
        int                     reg_shift;      /* bit shift for I2C register addresses */
        struct completion       cmd_complete;
        struct resource         *ioarea;
-       u32                     latency;        /* maximum MPU wkup latency */
-       struct pm_qos_request   pm_qos_request;
+       u32                     latency;        /* maximum mpu wkup latency */
+       void                    (*set_mpu_wkup_lat)(struct device *dev,
+                                                   long latency);
        u32                     speed;          /* Speed of bus in kHz */
        u32                     dtrev;          /* extra revision from DT */
        u32                     flags;
@@ -494,7 +494,9 @@ static void omap_i2c_resize_fifo(struct omap_i2c_dev *dev, u8 size, bool is_rx)
                dev->b_hw = 1; /* Enable hardware fixes */
 
        /* calculate wakeup latency constraint for MPU */
-       dev->latency = (1000000 * dev->threshold) / (1000 * dev->speed / 8);
+       if (dev->set_mpu_wkup_lat != NULL)
+               dev->latency = (1000000 * dev->threshold) /
+                       (1000 * dev->speed / 8);
 }
 
 /*
@@ -522,6 +524,9 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
        dev->buf = msg->buf;
        dev->buf_len = msg->len;
 
+       /* make sure writes to dev->buf_len are ordered */
+       barrier();
+
        omap_i2c_write_reg(dev, OMAP_I2C_CNT_REG, dev->buf_len);
 
        /* Clear the FIFO Buffers */
@@ -579,7 +584,6 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
         */
        timeout = wait_for_completion_timeout(&dev->cmd_complete,
                                                OMAP_I2C_TIMEOUT);
-       dev->buf_len = 0;
        if (timeout == 0) {
                dev_err(dev->dev, "controller timed out\n");
                omap_i2c_init(dev);
@@ -629,16 +633,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
        if (r < 0)
                goto out;
 
-       /*
-        * When waiting for completion of a i2c transfer, we need to
-        * set a wake up latency constraint for the MPU. This is to
-        * ensure quick enough wakeup from idle, when transfer
-        * completes.
-        */
-       if (dev->latency)
-               pm_qos_add_request(&dev->pm_qos_request,
-                                  PM_QOS_CPU_DMA_LATENCY,
-                                  dev->latency);
+       if (dev->set_mpu_wkup_lat != NULL)
+               dev->set_mpu_wkup_lat(dev->dev, dev->latency);
 
        for (i = 0; i < num; i++) {
                r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1)));
@@ -646,8 +642,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
                        break;
        }
 
-       if (dev->latency)
-               pm_qos_remove_request(&dev->pm_qos_request);
+       if (dev->set_mpu_wkup_lat != NULL)
+               dev->set_mpu_wkup_lat(dev->dev, -1);
 
        if (r == 0)
                r = num;
@@ -1104,6 +1100,7 @@ omap_i2c_probe(struct platform_device *pdev)
        } else if (pdata != NULL) {
                dev->speed = pdata->clkrate;
                dev->flags = pdata->flags;
+               dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
                dev->dtrev = pdata->rev;
        }
 
@@ -1159,8 +1156,9 @@ omap_i2c_probe(struct platform_device *pdev)
                        dev->b_hw = 1; /* Enable hardware fixes */
 
                /* calculate wakeup latency constraint for MPU */
-               dev->latency = (1000000 * dev->fifo_size) /
-                              (1000 * dev->speed / 8);
+               if (dev->set_mpu_wkup_lat != NULL)
+                       dev->latency = (1000000 * dev->fifo_size) /
+                                      (1000 * dev->speed / 8);
        }
 
        /* reset ASAP, clearing any IRQs */
index 3e0335f..9d90272 100644 (file)
@@ -806,6 +806,7 @@ static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
                        dev_err(i2c->dev, "invalid gpio[%d]: %d\n", idx, gpio);
                        goto free_gpio;
                }
+               i2c->gpios[idx] = gpio;
 
                ret = gpio_request(gpio, "i2c-bus");
                if (ret) {
index 5f097f3..7fa5b24 100644 (file)
@@ -169,7 +169,7 @@ static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev)
        mux->busses = devm_kzalloc(&pdev->dev,
                                   sizeof(mux->busses) * mux->pdata->bus_count,
                                   GFP_KERNEL);
-       if (!mux->states) {
+       if (!mux->busses) {
                dev_err(&pdev->dev, "Cannot allocate busses\n");
                ret = -ENOMEM;
                goto err;
index c0ec7d4..1abbc17 100644 (file)
@@ -26,10 +26,14 @@ static void copy_abs(struct input_dev *dev, unsigned int dst, unsigned int src)
  * input_mt_init_slots() - initialize MT input slots
  * @dev: input device supporting MT events and finger tracking
  * @num_slots: number of slots used by the device
+ * @flags: mt tasks to handle in core
  *
  * This function allocates all necessary memory for MT slot handling
  * in the input device, prepares the ABS_MT_SLOT and
  * ABS_MT_TRACKING_ID events for use and sets up appropriate buffers.
+ * Depending on the flags set, it also performs pointer emulation and
+ * frame synchronization.
+ *
  * May be called repeatedly. Returns -EINVAL if attempting to
  * reinitialize with a different number of slots.
  */
index 8f02e3d..4c842c3 100644 (file)
@@ -12,8 +12,8 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #define MOUSEDEV_MINOR_BASE    32
-#define MOUSEDEV_MINORS                32
-#define MOUSEDEV_MIX           31
+#define MOUSEDEV_MINORS                31
+#define MOUSEDEV_MIX           63
 
 #include <linux/sched.h>
 #include <linux/slab.h>
index f02028e..78e5d9a 100644 (file)
@@ -955,7 +955,8 @@ static int ads7846_resume(struct device *dev)
 
 static SIMPLE_DEV_PM_OPS(ads7846_pm, ads7846_suspend, ads7846_resume);
 
-static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads7846 *ts)
+static int __devinit ads7846_setup_pendown(struct spi_device *spi,
+                                          struct ads7846 *ts)
 {
        struct ads7846_platform_data *pdata = spi->dev.platform_data;
        int err;
@@ -981,6 +982,9 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads784
 
                ts->gpio_pendown = pdata->gpio_pendown;
 
+               if (pdata->gpio_pendown_debounce)
+                       gpio_set_debounce(pdata->gpio_pendown,
+                                         pdata->gpio_pendown_debounce);
        } else {
                dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n");
                return -EINVAL;
index d4a4cd4..0badfa4 100644 (file)
@@ -4108,7 +4108,7 @@ static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
 static int intel_iommu_add_device(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
-       struct pci_dev *bridge, *dma_pdev;
+       struct pci_dev *bridge, *dma_pdev = NULL;
        struct iommu_group *group;
        int ret;
 
@@ -4122,7 +4122,7 @@ static int intel_iommu_add_device(struct device *dev)
                        dma_pdev = pci_get_domain_bus_and_slot(
                                                pci_domain_nr(pdev->bus),
                                                bridge->subordinate->number, 0);
-               else
+               if (!dma_pdev)
                        dma_pdev = pci_dev_get(bridge);
        } else
                dma_pdev = pci_dev_get(pdev);
index a649f14..c0f7a42 100644 (file)
@@ -1054,6 +1054,7 @@ static int smmu_debugfs_stats_show(struct seq_file *s, void *v)
                        stats[i], val, offs);
        }
        seq_printf(s, "\n");
+       dput(dent);
 
        return 0;
 }
index dc670cc..16c78f1 100644 (file)
@@ -168,7 +168,8 @@ static int __init armctrl_of_init(struct device_node *node,
 }
 
 static struct of_device_id irq_of_match[] __initconst = {
-       { .compatible = "brcm,bcm2835-armctrl-ic", .data = armctrl_of_init }
+       { .compatible = "brcm,bcm2835-armctrl-ic", .data = armctrl_of_init },
+       { }
 };
 
 void __init bcm2835_init_irq(void)
index a233ed5..86cd75a 100644 (file)
@@ -4,7 +4,7 @@
 
 menuconfig ISDN
        bool "ISDN support"
-       depends on NET
+       depends on NET && NETDEVICES
        depends on !S390 && !UML
        ---help---
          ISDN ("Integrated Services Digital Network", called RNIS in France)
index 2302fbe..9c6650e 100644 (file)
@@ -6,7 +6,7 @@ if ISDN_I4L
 
 config ISDN_PPP
        bool "Support synchronous PPP"
-       depends on INET && NETDEVICES
+       depends on INET
        select SLHC
        help
          Over digital connections such as ISDN, there is no need to
index 8c610fa..e2a945e 100644 (file)
@@ -1312,7 +1312,6 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
                        } else
                                return -EINVAL;
                        break;
-#ifdef CONFIG_NETDEVICES
                case IIOCNETGPN:
                        /* Get peer phone number of a connected
                         * isdn network interface */
@@ -1322,7 +1321,6 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
                                return isdn_net_getpeer(&phone, argp);
                        } else
                                return -EINVAL;
-#endif
                default:
                        return -EINVAL;
                }
@@ -1352,7 +1350,6 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
                case IIOCNETLCR:
                        printk(KERN_INFO "INFO: ISDN_ABC_LCR_SUPPORT not enabled\n");
                        return -ENODEV;
-#ifdef CONFIG_NETDEVICES
                case IIOCNETAIF:
                        /* Add a network-interface */
                        if (arg) {
@@ -1491,7 +1488,6 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
                                return -EFAULT;
                        return isdn_net_force_hangup(name);
                        break;
-#endif                          /* CONFIG_NETDEVICES */
                case IIOCSETVER:
                        dev->net_verbose = arg;
                        printk(KERN_INFO "isdn: Verbose-Level is %d\n", dev->net_verbose);
index b312056..4239b39 100644 (file)
@@ -33,8 +33,6 @@
 struct led_trigger_cpu {
        char name[MAX_NAME_LEN];
        struct led_trigger *_trig;
-       struct mutex lock;
-       int lock_is_inited;
 };
 
 static DEFINE_PER_CPU(struct led_trigger_cpu, cpu_trig);
@@ -50,12 +48,6 @@ void ledtrig_cpu(enum cpu_led_event ledevt)
 {
        struct led_trigger_cpu *trig = &__get_cpu_var(cpu_trig);
 
-       /* mutex lock should be initialized before calling mutex_call() */
-       if (!trig->lock_is_inited)
-               return;
-
-       mutex_lock(&trig->lock);
-
        /* Locate the correct CPU LED */
        switch (ledevt) {
        case CPU_LED_IDLE_END:
@@ -75,8 +67,6 @@ void ledtrig_cpu(enum cpu_led_event ledevt)
                /* Will leave the LED as it is */
                break;
        }
-
-       mutex_unlock(&trig->lock);
 }
 EXPORT_SYMBOL(ledtrig_cpu);
 
@@ -117,14 +107,9 @@ static int __init ledtrig_cpu_init(void)
        for_each_possible_cpu(cpu) {
                struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
 
-               mutex_init(&trig->lock);
-
                snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu);
 
-               mutex_lock(&trig->lock);
                led_trigger_register_simple(trig->name, &trig->_trig);
-               trig->lock_is_inited = 1;
-               mutex_unlock(&trig->lock);
        }
 
        register_syscore_ops(&ledtrig_cpu_syscore_ops);
@@ -142,15 +127,9 @@ static void __exit ledtrig_cpu_exit(void)
        for_each_possible_cpu(cpu) {
                struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
 
-               mutex_lock(&trig->lock);
-
                led_trigger_unregister_simple(trig->_trig);
                trig->_trig = NULL;
                memset(trig->name, 0, MAX_NAME_LEN);
-               trig->lock_is_inited = 0;
-
-               mutex_unlock(&trig->lock);
-               mutex_destroy(&trig->lock);
        }
 
        unregister_syscore_ops(&ledtrig_cpu_syscore_ops);
index 02db918..77e6eff 100644 (file)
@@ -740,8 +740,14 @@ static void rq_completed(struct mapped_device *md, int rw, int run_queue)
        if (!md_in_flight(md))
                wake_up(&md->wait);
 
+       /*
+        * Run this off this callpath, as drivers could invoke end_io while
+        * inside their request_fn (and holding the queue lock). Calling
+        * back into ->request_fn() could deadlock attempting to grab the
+        * queue lock again.
+        */
        if (run_queue)
-               blk_run_queue(md->queue);
+               blk_run_queue_async(md->queue);
 
        /*
         * dm_put() must be at the end of this function. See the comment above
index 9ab768a..6120071 100644 (file)
@@ -1817,10 +1817,10 @@ retry:
                        memset(bbp, 0xff, PAGE_SIZE);
 
                        for (i = 0 ; i < bb->count ; i++) {
-                               u64 internal_bb = *p++;
+                               u64 internal_bb = p[i];
                                u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
                                                | BB_LEN(internal_bb));
-                               *bbp++ = cpu_to_le64(store_bb);
+                               bbp[i] = cpu_to_le64(store_bb);
                        }
                        bb->changed = 0;
                        if (read_seqretry(&bb->lock, seq))
@@ -5294,7 +5294,7 @@ void md_stop_writes(struct mddev *mddev)
 }
 EXPORT_SYMBOL_GPL(md_stop_writes);
 
-void md_stop(struct mddev *mddev)
+static void __md_stop(struct mddev *mddev)
 {
        mddev->ready = 0;
        mddev->pers->stop(mddev);
@@ -5304,6 +5304,18 @@ void md_stop(struct mddev *mddev)
        mddev->pers = NULL;
        clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
 }
+
+void md_stop(struct mddev *mddev)
+{
+       /* stop the array and free an attached data structures.
+        * This is called from dm-raid
+        */
+       __md_stop(mddev);
+       bitmap_destroy(mddev);
+       if (mddev->bio_set)
+               bioset_free(mddev->bio_set);
+}
+
 EXPORT_SYMBOL_GPL(md_stop);
 
 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
@@ -5364,7 +5376,7 @@ static int do_md_stop(struct mddev * mddev, int mode,
                        set_disk_ro(disk, 0);
 
                __md_stop_writes(mddev);
-               md_stop(mddev);
+               __md_stop(mddev);
                mddev->queue->merge_bvec_fn = NULL;
                mddev->queue->backing_dev_info.congested_fn = NULL;
 
@@ -7936,9 +7948,9 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
                   sector_t *first_bad, int *bad_sectors)
 {
        int hi;
-       int lo = 0;
+       int lo;
        u64 *p = bb->page;
-       int rv = 0;
+       int rv;
        sector_t target = s + sectors;
        unsigned seq;
 
@@ -7953,7 +7965,8 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
 
 retry:
        seq = read_seqbegin(&bb->lock);
-
+       lo = 0;
+       rv = 0;
        hi = bb->count;
 
        /* Binary search between lo and hi for 'target'
index d1295af..0d5d0ff 100644 (file)
@@ -499,7 +499,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
         */
        one_write_done(r10_bio);
        if (dec_rdev)
-               rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
+               rdev_dec_pending(rdev, conf->mddev);
 }
 
 /*
@@ -1334,18 +1334,21 @@ retry_write:
                        blocked_rdev = rrdev;
                        break;
                }
+               if (rdev && (test_bit(Faulty, &rdev->flags)
+                            || test_bit(Unmerged, &rdev->flags)))
+                       rdev = NULL;
                if (rrdev && (test_bit(Faulty, &rrdev->flags)
                              || test_bit(Unmerged, &rrdev->flags)))
                        rrdev = NULL;
 
                r10_bio->devs[i].bio = NULL;
                r10_bio->devs[i].repl_bio = NULL;
-               if (!rdev || test_bit(Faulty, &rdev->flags) ||
-                   test_bit(Unmerged, &rdev->flags)) {
+
+               if (!rdev && !rrdev) {
                        set_bit(R10BIO_Degraded, &r10_bio->state);
                        continue;
                }
-               if (test_bit(WriteErrorSeen, &rdev->flags)) {
+               if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
                        sector_t first_bad;
                        sector_t dev_sector = r10_bio->devs[i].addr;
                        int bad_sectors;
@@ -1387,8 +1390,10 @@ retry_write:
                                        max_sectors = good_sectors;
                        }
                }
-               r10_bio->devs[i].bio = bio;
-               atomic_inc(&rdev->nr_pending);
+               if (rdev) {
+                       r10_bio->devs[i].bio = bio;
+                       atomic_inc(&rdev->nr_pending);
+               }
                if (rrdev) {
                        r10_bio->devs[i].repl_bio = bio;
                        atomic_inc(&rrdev->nr_pending);
@@ -1444,69 +1449,71 @@ retry_write:
        for (i = 0; i < conf->copies; i++) {
                struct bio *mbio;
                int d = r10_bio->devs[i].devnum;
-               if (!r10_bio->devs[i].bio)
-                       continue;
+               if (r10_bio->devs[i].bio) {
+                       struct md_rdev *rdev = conf->mirrors[d].rdev;
+                       mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+                       md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
+                                   max_sectors);
+                       r10_bio->devs[i].bio = mbio;
+
+                       mbio->bi_sector = (r10_bio->devs[i].addr+
+                                          choose_data_offset(r10_bio,
+                                                             rdev));
+                       mbio->bi_bdev = rdev->bdev;
+                       mbio->bi_end_io = raid10_end_write_request;
+                       mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
+                       mbio->bi_private = r10_bio;
 
-               mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
-                           max_sectors);
-               r10_bio->devs[i].bio = mbio;
+                       atomic_inc(&r10_bio->remaining);
 
-               mbio->bi_sector = (r10_bio->devs[i].addr+
-                                  choose_data_offset(r10_bio,
-                                                     conf->mirrors[d].rdev));
-               mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
-               mbio->bi_end_io = raid10_end_write_request;
-               mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
-               mbio->bi_private = r10_bio;
+                       cb = blk_check_plugged(raid10_unplug, mddev,
+                                              sizeof(*plug));
+                       if (cb)
+                               plug = container_of(cb, struct raid10_plug_cb,
+                                                   cb);
+                       else
+                               plug = NULL;
+                       spin_lock_irqsave(&conf->device_lock, flags);
+                       if (plug) {
+                               bio_list_add(&plug->pending, mbio);
+                               plug->pending_cnt++;
+                       } else {
+                               bio_list_add(&conf->pending_bio_list, mbio);
+                               conf->pending_count++;
+                       }
+                       spin_unlock_irqrestore(&conf->device_lock, flags);
+                       if (!plug)
+                               md_wakeup_thread(mddev->thread);
+               }
 
-               atomic_inc(&r10_bio->remaining);
+               if (r10_bio->devs[i].repl_bio) {
+                       struct md_rdev *rdev = conf->mirrors[d].replacement;
+                       if (rdev == NULL) {
+                               /* Replacement just got moved to main 'rdev' */
+                               smp_mb();
+                               rdev = conf->mirrors[d].rdev;
+                       }
+                       mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+                       md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
+                                   max_sectors);
+                       r10_bio->devs[i].repl_bio = mbio;
+
+                       mbio->bi_sector = (r10_bio->devs[i].addr +
+                                          choose_data_offset(
+                                                  r10_bio, rdev));
+                       mbio->bi_bdev = rdev->bdev;
+                       mbio->bi_end_io = raid10_end_write_request;
+                       mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
+                       mbio->bi_private = r10_bio;
 
-               cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
-               if (cb)
-                       plug = container_of(cb, struct raid10_plug_cb, cb);
-               else
-                       plug = NULL;
-               spin_lock_irqsave(&conf->device_lock, flags);
-               if (plug) {
-                       bio_list_add(&plug->pending, mbio);
-                       plug->pending_cnt++;
-               } else {
+                       atomic_inc(&r10_bio->remaining);
+                       spin_lock_irqsave(&conf->device_lock, flags);
                        bio_list_add(&conf->pending_bio_list, mbio);
                        conf->pending_count++;
+                       spin_unlock_irqrestore(&conf->device_lock, flags);
+                       if (!mddev_check_plugged(mddev))
+                               md_wakeup_thread(mddev->thread);
                }
-               spin_unlock_irqrestore(&conf->device_lock, flags);
-               if (!plug)
-                       md_wakeup_thread(mddev->thread);
-
-               if (!r10_bio->devs[i].repl_bio)
-                       continue;
-
-               mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
-                           max_sectors);
-               r10_bio->devs[i].repl_bio = mbio;
-
-               /* We are actively writing to the original device
-                * so it cannot disappear, so the replacement cannot
-                * become NULL here
-                */
-               mbio->bi_sector = (r10_bio->devs[i].addr +
-                                  choose_data_offset(
-                                          r10_bio,
-                                          conf->mirrors[d].replacement));
-               mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
-               mbio->bi_end_io = raid10_end_write_request;
-               mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
-               mbio->bi_private = r10_bio;
-
-               atomic_inc(&r10_bio->remaining);
-               spin_lock_irqsave(&conf->device_lock, flags);
-               bio_list_add(&conf->pending_bio_list, mbio);
-               conf->pending_count++;
-               spin_unlock_irqrestore(&conf->device_lock, flags);
-               if (!mddev_check_plugged(mddev))
-                       md_wakeup_thread(mddev->thread);
        }
 
        /* Don't remove the bias on 'remaining' (one_write_done) until
index c5439dc..a450268 100644 (file)
@@ -2774,10 +2774,12 @@ static void handle_stripe_clean_event(struct r5conf *conf,
                        dev = &sh->dev[i];
                        if (!test_bit(R5_LOCKED, &dev->flags) &&
                            (test_bit(R5_UPTODATE, &dev->flags) ||
-                            test_and_clear_bit(R5_Discard, &dev->flags))) {
+                            test_bit(R5_Discard, &dev->flags))) {
                                /* We can return any write requests */
                                struct bio *wbi, *wbi2;
                                pr_debug("Return write for disc %d\n", i);
+                               if (test_and_clear_bit(R5_Discard, &dev->flags))
+                                       clear_bit(R5_UPTODATE, &dev->flags);
                                wbi = dev->written;
                                dev->written = NULL;
                                while (wbi && wbi->bi_sector <
@@ -2795,7 +2797,8 @@ static void handle_stripe_clean_event(struct r5conf *conf,
                                         !test_bit(STRIPE_DEGRADED, &sh->state),
                                                0);
                        }
-               }
+               } else if (test_bit(R5_Discard, &sh->dev[i].flags))
+                       clear_bit(R5_Discard, &sh->dev[i].flags);
 
        if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
                if (atomic_dec_and_test(&conf->pending_full_writes))
@@ -3490,40 +3493,6 @@ static void handle_stripe(struct stripe_head *sh)
                        handle_failed_sync(conf, sh, &s);
        }
 
-       /*
-        * might be able to return some write requests if the parity blocks
-        * are safe, or on a failed drive
-        */
-       pdev = &sh->dev[sh->pd_idx];
-       s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
-               || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
-       qdev = &sh->dev[sh->qd_idx];
-       s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
-               || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
-               || conf->level < 6;
-
-       if (s.written &&
-           (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
-                            && !test_bit(R5_LOCKED, &pdev->flags)
-                            && (test_bit(R5_UPTODATE, &pdev->flags) ||
-                                test_bit(R5_Discard, &pdev->flags))))) &&
-           (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
-                            && !test_bit(R5_LOCKED, &qdev->flags)
-                            && (test_bit(R5_UPTODATE, &qdev->flags) ||
-                                test_bit(R5_Discard, &qdev->flags))))))
-               handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
-
-       /* Now we might consider reading some blocks, either to check/generate
-        * parity, or to satisfy requests
-        * or to load a block that is being partially written.
-        */
-       if (s.to_read || s.non_overwrite
-           || (conf->level == 6 && s.to_write && s.failed)
-           || (s.syncing && (s.uptodate + s.compute < disks))
-           || s.replacing
-           || s.expanding)
-               handle_stripe_fill(sh, &s, disks);
-
        /* Now we check to see if any write operations have recently
         * completed
         */
@@ -3561,6 +3530,40 @@ static void handle_stripe(struct stripe_head *sh)
                        s.dec_preread_active = 1;
        }
 
+       /*
+        * might be able to return some write requests if the parity blocks
+        * are safe, or on a failed drive
+        */
+       pdev = &sh->dev[sh->pd_idx];
+       s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
+               || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
+       qdev = &sh->dev[sh->qd_idx];
+       s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
+               || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
+               || conf->level < 6;
+
+       if (s.written &&
+           (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
+                            && !test_bit(R5_LOCKED, &pdev->flags)
+                            && (test_bit(R5_UPTODATE, &pdev->flags) ||
+                                test_bit(R5_Discard, &pdev->flags))))) &&
+           (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
+                            && !test_bit(R5_LOCKED, &qdev->flags)
+                            && (test_bit(R5_UPTODATE, &qdev->flags) ||
+                                test_bit(R5_Discard, &qdev->flags))))))
+               handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
+
+       /* Now we might consider reading some blocks, either to check/generate
+        * parity, or to satisfy requests
+        * or to load a block that is being partially written.
+        */
+       if (s.to_read || s.non_overwrite
+           || (conf->level == 6 && s.to_write && s.failed)
+           || (s.syncing && (s.uptodate + s.compute < disks))
+           || s.replacing
+           || s.expanding)
+               handle_stripe_fill(sh, &s, disks);
+
        /* Now to consider new write requests and what else, if anything
         * should be read.  We do not handle new writes when:
         * 1/ A 'write' operation (copy+xor) is already in flight.
@@ -5529,6 +5532,10 @@ static int run(struct mddev *mddev)
                 * discard data disk but write parity disk
                 */
                stripe = stripe * PAGE_SIZE;
+               /* Round up to power of 2, as discard handling
+                * currently assumes that */
+               while ((stripe-1) & stripe)
+                       stripe = (stripe | (stripe-1)) + 1;
                mddev->queue->limits.discard_alignment = stripe;
                mddev->queue->limits.discard_granularity = stripe;
                /*
index 660bbc5..4d50da6 100644 (file)
@@ -208,7 +208,7 @@ static unsigned long exynos5250_dwmmc_caps[4] = {
        MMC_CAP_CMD23,
 };
 
-static struct dw_mci_drv_data exynos5250_drv_data = {
+static const struct dw_mci_drv_data exynos5250_drv_data = {
        .caps                   = exynos5250_dwmmc_caps,
        .init                   = dw_mci_exynos_priv_init,
        .setup_clock            = dw_mci_exynos_setup_clock,
@@ -220,14 +220,14 @@ static struct dw_mci_drv_data exynos5250_drv_data = {
 
 static const struct of_device_id dw_mci_exynos_match[] = {
        { .compatible = "samsung,exynos5250-dw-mshc",
-                       .data = (void *)&exynos5250_drv_data, },
+                       .data = &exynos5250_drv_data, },
        {},
 };
-MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match);
+MODULE_DEVICE_TABLE(of, dw_mci_exynos_match);
 
 int dw_mci_exynos_probe(struct platform_device *pdev)
 {
-       struct dw_mci_drv_data *drv_data;
+       const struct dw_mci_drv_data *drv_data;
        const struct of_device_id *match;
 
        match = of_match_node(dw_mci_exynos_match, pdev->dev.of_node);
index c960ca7..917936b 100644 (file)
@@ -24,7 +24,7 @@
 #include "dw_mmc.h"
 
 int dw_mci_pltfm_register(struct platform_device *pdev,
-                               struct dw_mci_drv_data *drv_data)
+                               const struct dw_mci_drv_data *drv_data)
 {
        struct dw_mci *host;
        struct resource *regs;
@@ -50,8 +50,8 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
        if (!host->regs)
                return -ENOMEM;
 
-       if (host->drv_data->init) {
-               ret = host->drv_data->init(host);
+       if (drv_data && drv_data->init) {
+               ret = drv_data->init(host);
                if (ret)
                        return ret;
        }
index 301f245..2ac37b8 100644 (file)
@@ -13,7 +13,7 @@
 #define _DW_MMC_PLTFM_H_
 
 extern int dw_mci_pltfm_register(struct platform_device *pdev,
-                               struct dw_mci_drv_data *drv_data);
+                               const struct dw_mci_drv_data *drv_data);
 extern int __devexit dw_mci_pltfm_remove(struct platform_device *pdev);
 extern const struct dev_pm_ops dw_mci_pltfm_pmops;
 
index c2828f3..c0667c8 100644 (file)
@@ -232,6 +232,7 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
 {
        struct mmc_data *data;
        struct dw_mci_slot *slot = mmc_priv(mmc);
+       struct dw_mci_drv_data *drv_data = slot->host->drv_data;
        u32 cmdr;
        cmd->error = -EINPROGRESS;
 
@@ -261,8 +262,8 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
                        cmdr |= SDMMC_CMD_DAT_WR;
        }
 
-       if (slot->host->drv_data->prepare_command)
-               slot->host->drv_data->prepare_command(slot->host, &cmdr);
+       if (drv_data && drv_data->prepare_command)
+               drv_data->prepare_command(slot->host, &cmdr);
 
        return cmdr;
 }
@@ -434,7 +435,7 @@ static int dw_mci_idmac_init(struct dw_mci *host)
        return 0;
 }
 
-static struct dw_mci_dma_ops dw_mci_idmac_ops = {
+static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
        .init = dw_mci_idmac_init,
        .start = dw_mci_idmac_start_dma,
        .stop = dw_mci_idmac_stop_dma,
@@ -772,6 +773,7 @@ static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 {
        struct dw_mci_slot *slot = mmc_priv(mmc);
+       struct dw_mci_drv_data *drv_data = slot->host->drv_data;
        u32 regs;
 
        /* set default 1 bit mode */
@@ -807,8 +809,8 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                slot->clock = ios->clock;
        }
 
-       if (slot->host->drv_data->set_ios)
-               slot->host->drv_data->set_ios(slot->host, ios);
+       if (drv_data && drv_data->set_ios)
+               drv_data->set_ios(slot->host, ios);
 
        switch (ios->power_mode) {
        case MMC_POWER_UP:
@@ -1815,6 +1817,7 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
 {
        struct mmc_host *mmc;
        struct dw_mci_slot *slot;
+       struct dw_mci_drv_data *drv_data = host->drv_data;
        int ctrl_id, ret;
        u8 bus_width;
 
@@ -1854,8 +1857,8 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
        } else {
                ctrl_id = to_platform_device(host->dev)->id;
        }
-       if (host->drv_data && host->drv_data->caps)
-               mmc->caps |= host->drv_data->caps[ctrl_id];
+       if (drv_data && drv_data->caps)
+               mmc->caps |= drv_data->caps[ctrl_id];
 
        if (host->pdata->caps2)
                mmc->caps2 = host->pdata->caps2;
@@ -1867,10 +1870,10 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
        else
                bus_width = 1;
 
-       if (host->drv_data->setup_bus) {
+       if (drv_data && drv_data->setup_bus) {
                struct device_node *slot_np;
                slot_np = dw_mci_of_find_slot_node(host->dev, slot->id);
-               ret = host->drv_data->setup_bus(host, slot_np, bus_width);
+               ret = drv_data->setup_bus(host, slot_np, bus_width);
                if (ret)
                        goto err_setup_bus;
        }
@@ -1968,7 +1971,7 @@ static void dw_mci_init_dma(struct dw_mci *host)
        /* Determine which DMA interface to use */
 #ifdef CONFIG_MMC_DW_IDMAC
        host->dma_ops = &dw_mci_idmac_ops;
-       dev_info(&host->dev, "Using internal DMA controller.\n");
+       dev_info(host->dev, "Using internal DMA controller.\n");
 #endif
 
        if (!host->dma_ops)
@@ -2035,6 +2038,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
        struct dw_mci_board *pdata;
        struct device *dev = host->dev;
        struct device_node *np = dev->of_node;
+       struct dw_mci_drv_data *drv_data = host->drv_data;
        int idx, ret;
 
        pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
@@ -2062,8 +2066,8 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
 
        of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
 
-       if (host->drv_data->parse_dt) {
-               ret = host->drv_data->parse_dt(host);
+       if (drv_data && drv_data->parse_dt) {
+               ret = drv_data->parse_dt(host);
                if (ret)
                        return ERR_PTR(ret);
        }
@@ -2080,6 +2084,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
 
 int dw_mci_probe(struct dw_mci *host)
 {
+       struct dw_mci_drv_data *drv_data = host->drv_data;
        int width, i, ret = 0;
        u32 fifo_size;
        int init_slots = 0;
@@ -2127,8 +2132,8 @@ int dw_mci_probe(struct dw_mci *host)
        else
                host->bus_hz = clk_get_rate(host->ciu_clk);
 
-       if (host->drv_data->setup_clock) {
-               ret = host->drv_data->setup_clock(host);
+       if (drv_data && drv_data->setup_clock) {
+               ret = drv_data->setup_clock(host);
                if (ret) {
                        dev_err(host->dev,
                                "implementation specific clock setup failed\n");
@@ -2228,6 +2233,21 @@ int dw_mci_probe(struct dw_mci *host)
        else
                host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
 
+       /*
+        * Enable interrupts for command done, data over, data empty, card det,
+        * receive ready and error such as transmit, receive timeout, crc error
+        */
+       mci_writel(host, RINTSTS, 0xFFFFFFFF);
+       mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
+                  SDMMC_INT_TXDR | SDMMC_INT_RXDR |
+                  DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
+       mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
+
+       dev_info(host->dev, "DW MMC controller at irq %d, "
+                "%d bit host data width, "
+                "%u deep fifo\n",
+                host->irq, width, fifo_size);
+
        /* We need at least one slot to succeed */
        for (i = 0; i < host->num_slots; i++) {
                ret = dw_mci_init_slot(host, i);
@@ -2257,20 +2277,6 @@ int dw_mci_probe(struct dw_mci *host)
        else
                host->data_offset = DATA_240A_OFFSET;
 
-       /*
-        * Enable interrupts for command done, data over, data empty, card det,
-        * receive ready and error such as transmit, receive timeout, crc error
-        */
-       mci_writel(host, RINTSTS, 0xFFFFFFFF);
-       mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
-                  SDMMC_INT_TXDR | SDMMC_INT_RXDR |
-                  DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
-       mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
-
-       dev_info(host->dev, "DW MMC controller at irq %d, "
-                "%d bit host data width, "
-                "%u deep fifo\n",
-                host->irq, width, fifo_size);
        if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
                dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
 
index 565c2e4..6290b7f 100644 (file)
@@ -1134,4 +1134,4 @@ module_platform_driver(mxcmci_driver);
 MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
 MODULE_AUTHOR("Sascha Hauer, Pengutronix");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx-mmc");
+MODULE_ALIAS("platform:mxc-mmc");
index 54bfd0c..fedd258 100644 (file)
@@ -178,7 +178,8 @@ struct omap_hsmmc_host {
 
 static int omap_hsmmc_card_detect(struct device *dev, int slot)
 {
-       struct omap_mmc_platform_data *mmc = dev->platform_data;
+       struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+       struct omap_mmc_platform_data *mmc = host->pdata;
 
        /* NOTE: assumes card detect signal is active-low */
        return !gpio_get_value_cansleep(mmc->slots[0].switch_pin);
@@ -186,7 +187,8 @@ static int omap_hsmmc_card_detect(struct device *dev, int slot)
 
 static int omap_hsmmc_get_wp(struct device *dev, int slot)
 {
-       struct omap_mmc_platform_data *mmc = dev->platform_data;
+       struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+       struct omap_mmc_platform_data *mmc = host->pdata;
 
        /* NOTE: assumes write protect signal is active-high */
        return gpio_get_value_cansleep(mmc->slots[0].gpio_wp);
@@ -194,7 +196,8 @@ static int omap_hsmmc_get_wp(struct device *dev, int slot)
 
 static int omap_hsmmc_get_cover_state(struct device *dev, int slot)
 {
-       struct omap_mmc_platform_data *mmc = dev->platform_data;
+       struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+       struct omap_mmc_platform_data *mmc = host->pdata;
 
        /* NOTE: assumes card detect signal is active-low */
        return !gpio_get_value_cansleep(mmc->slots[0].switch_pin);
@@ -204,7 +207,8 @@ static int omap_hsmmc_get_cover_state(struct device *dev, int slot)
 
 static int omap_hsmmc_suspend_cdirq(struct device *dev, int slot)
 {
-       struct omap_mmc_platform_data *mmc = dev->platform_data;
+       struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+       struct omap_mmc_platform_data *mmc = host->pdata;
 
        disable_irq(mmc->slots[0].card_detect_irq);
        return 0;
@@ -212,7 +216,8 @@ static int omap_hsmmc_suspend_cdirq(struct device *dev, int slot)
 
 static int omap_hsmmc_resume_cdirq(struct device *dev, int slot)
 {
-       struct omap_mmc_platform_data *mmc = dev->platform_data;
+       struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+       struct omap_mmc_platform_data *mmc = host->pdata;
 
        enable_irq(mmc->slots[0].card_detect_irq);
        return 0;
@@ -2009,9 +2014,9 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev)
                clk_put(host->dbclk);
        }
 
-       mmc_free_host(host->mmc);
+       omap_hsmmc_gpio_free(host->pdata);
        iounmap(host->base);
-       omap_hsmmc_gpio_free(pdev->dev.platform_data);
+       mmc_free_host(host->mmc);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (res)
index 90140eb..8fd50a2 100644 (file)
@@ -19,6 +19,7 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include <linux/err.h>
 #include <linux/io.h>
 #include <linux/clk.h>
 #include <linux/err.h>
@@ -84,30 +85,32 @@ static int __devinit sdhci_dove_probe(struct platform_device *pdev)
        struct sdhci_dove_priv *priv;
        int ret;
 
-       ret = sdhci_pltfm_register(pdev, &sdhci_dove_pdata);
-       if (ret)
-               goto sdhci_dove_register_fail;
-
        priv = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_dove_priv),
                            GFP_KERNEL);
        if (!priv) {
                dev_err(&pdev->dev, "unable to allocate private data");
-               ret = -ENOMEM;
-               goto sdhci_dove_allocate_fail;
+               return -ENOMEM;
        }
 
+       priv->clk = clk_get(&pdev->dev, NULL);
+       if (!IS_ERR(priv->clk))
+               clk_prepare_enable(priv->clk);
+
+       ret = sdhci_pltfm_register(pdev, &sdhci_dove_pdata);
+       if (ret)
+               goto sdhci_dove_register_fail;
+
        host = platform_get_drvdata(pdev);
        pltfm_host = sdhci_priv(host);
        pltfm_host->priv = priv;
 
-       priv->clk = clk_get(&pdev->dev, NULL);
-       if (!IS_ERR(priv->clk))
-               clk_prepare_enable(priv->clk);
        return 0;
 
-sdhci_dove_allocate_fail:
-       sdhci_pltfm_unregister(pdev);
 sdhci_dove_register_fail:
+       if (!IS_ERR(priv->clk)) {
+               clk_disable_unprepare(priv->clk);
+               clk_put(priv->clk);
+       }
        return ret;
 }
 
@@ -117,14 +120,13 @@ static int __devexit sdhci_dove_remove(struct platform_device *pdev)
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
        struct sdhci_dove_priv *priv = pltfm_host->priv;
 
-       if (priv->clk) {
-               if (!IS_ERR(priv->clk)) {
-                       clk_disable_unprepare(priv->clk);
-                       clk_put(priv->clk);
-               }
-               devm_kfree(&pdev->dev, priv->clk);
+       sdhci_pltfm_unregister(pdev);
+
+       if (!IS_ERR(priv->clk)) {
+               clk_disable_unprepare(priv->clk);
+               clk_put(priv->clk);
        }
-       return sdhci_pltfm_unregister(pdev);
+       return 0;
 }
 
 static const struct of_device_id sdhci_dove_of_match_table[] __devinitdata = {
index ae5fcbf..63d219f 100644 (file)
@@ -169,6 +169,16 @@ static void esdhc_of_resume(struct sdhci_host *host)
 }
 #endif
 
+static void esdhc_of_platform_init(struct sdhci_host *host)
+{
+       u32 vvn;
+
+       vvn = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
+       vvn = (vvn & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
+       if (vvn == VENDOR_V_22)
+               host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
+}
+
 static struct sdhci_ops sdhci_esdhc_ops = {
        .read_l = esdhc_readl,
        .read_w = esdhc_readw,
@@ -180,6 +190,7 @@ static struct sdhci_ops sdhci_esdhc_ops = {
        .enable_dma = esdhc_of_enable_dma,
        .get_max_clock = esdhc_of_get_max_clock,
        .get_min_clock = esdhc_of_get_min_clock,
+       .platform_init = esdhc_of_platform_init,
 #ifdef CONFIG_PM
        .platform_suspend = esdhc_of_suspend,
        .platform_resume = esdhc_of_resume,
index 4bb74b0..04936f3 100644 (file)
@@ -1196,7 +1196,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
                return ERR_PTR(-ENODEV);
        }
 
-       if (pci_resource_len(pdev, bar) != 0x100) {
+       if (pci_resource_len(pdev, bar) < 0x100) {
                dev_err(&pdev->dev, "Invalid iomem size. You may "
                        "experience problems.\n");
        }
index 65551a9..2716445 100644 (file)
@@ -150,6 +150,13 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
                goto err_remap;
        }
 
+       /*
+        * Some platforms need to probe the controller to be able to
+        * determine which caps should be used.
+        */
+       if (host->ops && host->ops->platform_init)
+               host->ops->platform_init(host);
+
        platform_set_drvdata(pdev, host);
 
        return host;
index 2903949..a54dd5d 100644 (file)
@@ -211,8 +211,8 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)
        if (ourhost->cur_clk != best_src) {
                struct clk *clk = ourhost->clk_bus[best_src];
 
-               clk_enable(clk);
-               clk_disable(ourhost->clk_bus[ourhost->cur_clk]);
+               clk_prepare_enable(clk);
+               clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]);
 
                /* turn clock off to card before changing clock source */
                writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
@@ -607,7 +607,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
        }
 
        /* enable the local io clock and keep it running for the moment. */
-       clk_enable(sc->clk_io);
+       clk_prepare_enable(sc->clk_io);
 
        for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
                struct clk *clk;
@@ -638,7 +638,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
        }
 
 #ifndef CONFIG_PM_RUNTIME
-       clk_enable(sc->clk_bus[sc->cur_clk]);
+       clk_prepare_enable(sc->clk_bus[sc->cur_clk]);
 #endif
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -747,13 +747,14 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
                sdhci_s3c_setup_card_detect_gpio(sc);
 
 #ifdef CONFIG_PM_RUNTIME
-       clk_disable(sc->clk_io);
+       if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL)
+               clk_disable_unprepare(sc->clk_io);
 #endif
        return 0;
 
  err_req_regs:
 #ifndef CONFIG_PM_RUNTIME
-       clk_disable(sc->clk_bus[sc->cur_clk]);
+       clk_disable_unprepare(sc->clk_bus[sc->cur_clk]);
 #endif
        for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
                if (sc->clk_bus[ptr]) {
@@ -762,7 +763,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
        }
 
  err_no_busclks:
-       clk_disable(sc->clk_io);
+       clk_disable_unprepare(sc->clk_io);
        clk_put(sc->clk_io);
 
  err_io_clk:
@@ -794,7 +795,8 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
                gpio_free(sc->ext_cd_gpio);
 
 #ifdef CONFIG_PM_RUNTIME
-       clk_enable(sc->clk_io);
+       if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL)
+               clk_prepare_enable(sc->clk_io);
 #endif
        sdhci_remove_host(host, 1);
 
@@ -802,14 +804,14 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
        pm_runtime_disable(&pdev->dev);
 
 #ifndef CONFIG_PM_RUNTIME
-       clk_disable(sc->clk_bus[sc->cur_clk]);
+       clk_disable_unprepare(sc->clk_bus[sc->cur_clk]);
 #endif
        for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
                if (sc->clk_bus[ptr]) {
                        clk_put(sc->clk_bus[ptr]);
                }
        }
-       clk_disable(sc->clk_io);
+       clk_disable_unprepare(sc->clk_io);
        clk_put(sc->clk_io);
 
        if (pdev->dev.of_node) {
@@ -849,8 +851,8 @@ static int sdhci_s3c_runtime_suspend(struct device *dev)
 
        ret = sdhci_runtime_suspend_host(host);
 
-       clk_disable(ourhost->clk_bus[ourhost->cur_clk]);
-       clk_disable(busclk);
+       clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]);
+       clk_disable_unprepare(busclk);
        return ret;
 }
 
@@ -861,8 +863,8 @@ static int sdhci_s3c_runtime_resume(struct device *dev)
        struct clk *busclk = ourhost->clk_io;
        int ret;
 
-       clk_enable(busclk);
-       clk_enable(ourhost->clk_bus[ourhost->cur_clk]);
+       clk_prepare_enable(busclk);
+       clk_prepare_enable(ourhost->clk_bus[ourhost->cur_clk]);
        ret = sdhci_runtime_resume_host(host);
        return ret;
 }
index 7922adb..c7851c0 100644 (file)
@@ -1315,16 +1315,19 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
                 */
                if ((host->flags & SDHCI_NEEDS_RETUNING) &&
                    !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
-                       /* eMMC uses cmd21 while sd and sdio use cmd19 */
-                       tuning_opcode = mmc->card->type == MMC_TYPE_MMC ?
-                               MMC_SEND_TUNING_BLOCK_HS200 :
-                               MMC_SEND_TUNING_BLOCK;
-                       spin_unlock_irqrestore(&host->lock, flags);
-                       sdhci_execute_tuning(mmc, tuning_opcode);
-                       spin_lock_irqsave(&host->lock, flags);
-
-                       /* Restore original mmc_request structure */
-                       host->mrq = mrq;
+                       if (mmc->card) {
+                               /* eMMC uses cmd21 but sd and sdio use cmd19 */
+                               tuning_opcode =
+                                       mmc->card->type == MMC_TYPE_MMC ?
+                                       MMC_SEND_TUNING_BLOCK_HS200 :
+                                       MMC_SEND_TUNING_BLOCK;
+                               spin_unlock_irqrestore(&host->lock, flags);
+                               sdhci_execute_tuning(mmc, tuning_opcode);
+                               spin_lock_irqsave(&host->lock, flags);
+
+                               /* Restore original mmc_request structure */
+                               host->mrq = mrq;
+                       }
                }
 
                if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
@@ -2837,6 +2840,9 @@ int sdhci_add_host(struct sdhci_host *host)
        if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
                mmc->caps |= MMC_CAP_4_BIT_DATA;
 
+       if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
+               mmc->caps &= ~MMC_CAP_CMD23;
+
        if (caps[0] & SDHCI_CAN_DO_HISPD)
                mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
 
@@ -2846,9 +2852,12 @@ int sdhci_add_host(struct sdhci_host *host)
 
        /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
        host->vqmmc = regulator_get(mmc_dev(mmc), "vqmmc");
-       if (IS_ERR(host->vqmmc)) {
-               pr_info("%s: no vqmmc regulator found\n", mmc_hostname(mmc));
-               host->vqmmc = NULL;
+       if (IS_ERR_OR_NULL(host->vqmmc)) {
+               if (PTR_ERR(host->vqmmc) < 0) {
+                       pr_info("%s: no vqmmc regulator found\n",
+                               mmc_hostname(mmc));
+                       host->vqmmc = NULL;
+               }
        }
        else if (regulator_is_supported_voltage(host->vqmmc, 1800000, 1800000))
                regulator_enable(host->vqmmc);
@@ -2904,9 +2913,12 @@ int sdhci_add_host(struct sdhci_host *host)
        ocr_avail = 0;
 
        host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
-       if (IS_ERR(host->vmmc)) {
-               pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
-               host->vmmc = NULL;
+       if (IS_ERR_OR_NULL(host->vmmc)) {
+               if (PTR_ERR(host->vmmc) < 0) {
+                       pr_info("%s: no vmmc regulator found\n",
+                               mmc_hostname(mmc));
+                       host->vmmc = NULL;
+               }
        } else
                regulator_enable(host->vmmc);
 
index 97653ea..71a4a7e 100644 (file)
@@ -278,6 +278,7 @@ struct sdhci_ops {
        void    (*hw_reset)(struct sdhci_host *host);
        void    (*platform_suspend)(struct sdhci_host *host);
        void    (*platform_resume)(struct sdhci_host *host);
+       void    (*platform_init)(struct sdhci_host *host);
 };
 
 #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
index 11d2bc3..d25bc97 100644 (file)
@@ -1466,9 +1466,9 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, NULL);
 
+       clk_disable(host->hclk);
        mmc_free_host(host->mmc);
        pm_runtime_put_sync(&pdev->dev);
-       clk_disable(host->hclk);
        pm_runtime_disable(&pdev->dev);
 
        return 0;
index 8f52fc8..5a5cd2a 100644 (file)
@@ -240,7 +240,7 @@ static int parse_cmdline(char *devname, char *szstart, char *szlength)
 
        if (*(szlength) != '+') {
                devlength = simple_strtoul(szlength, &buffer, 0);
-               devlength = handle_unit(devlength, buffer) - devstart;
+               devlength = handle_unit(devlength, buffer);
                if (devlength < devstart)
                        goto err_out;
 
index ec6841d..1a03b7f 100644 (file)
@@ -2983,13 +2983,15 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
        /*
         * Field definitions are in the following datasheets:
         * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
-        * New style   (6 byte ID): Samsung K9GAG08U0F (p.44)
+        * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44)
         * Hynix MLC   (6 byte ID): Hynix H27UBG8T2B (p.22)
         *
-        * Check for ID length, cell type, and Hynix/Samsung ID to decide what
-        * to do.
+        * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung
+        * ID to decide what to do.
         */
-       if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG) {
+       if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
+                       (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+                       id_data[5] != 0x00) {
                /* Calc pagesize */
                mtd->writesize = 2048 << (extid & 0x03);
                extid >>= 2;
index 64be8f0..d9127e2 100644 (file)
@@ -121,7 +121,7 @@ static int parse_ofoldpart_partitions(struct mtd_info *master,
        nr_parts = plen / sizeof(part[0]);
 
        *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL);
-       if (!pparts)
+       if (!*pparts)
                return -ENOMEM;
 
        names = of_get_property(dp, "partition-names", &plen);
index 7153e0d..b3f41f2 100644 (file)
@@ -3694,7 +3694,7 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int
  * flexonenand_set_boundary    - Writes the SLC boundary
  * @param mtd                  - mtd info structure
  */
-int flexonenand_set_boundary(struct mtd_info *mtd, int die,
+static int flexonenand_set_boundary(struct mtd_info *mtd, int die,
                                    int boundary, int lock)
 {
        struct onenand_chip *this = mtd->priv;
index b2530b0..5f5b69f 100644 (file)
@@ -1379,6 +1379,8 @@ static void bond_compute_features(struct bonding *bond)
        struct net_device *bond_dev = bond->dev;
        netdev_features_t vlan_features = BOND_VLAN_FEATURES;
        unsigned short max_hard_header_len = ETH_HLEN;
+       unsigned int gso_max_size = GSO_MAX_SIZE;
+       u16 gso_max_segs = GSO_MAX_SEGS;
        int i;
        unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
 
@@ -1394,11 +1396,16 @@ static void bond_compute_features(struct bonding *bond)
                dst_release_flag &= slave->dev->priv_flags;
                if (slave->dev->hard_header_len > max_hard_header_len)
                        max_hard_header_len = slave->dev->hard_header_len;
+
+               gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
+               gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
        }
 
 done:
        bond_dev->vlan_features = vlan_features;
        bond_dev->hard_header_len = max_hard_header_len;
+       bond_dev->gso_max_segs = gso_max_segs;
+       netif_set_gso_max_size(bond_dev, gso_max_size);
 
        flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE;
        bond_dev->priv_flags = flags | dst_release_flag;
index d04911d..47618e5 100644 (file)
@@ -813,6 +813,7 @@ static int __init ne_drv_probe(struct platform_device *pdev)
                dev->irq = irq[this_dev];
                dev->mem_end = bad[this_dev];
        }
+       SET_NETDEV_DEV(dev, &pdev->dev);
        err = do_ne_probe(dev);
        if (err) {
                free_netdev(dev);
index c65295d..6e5bdd1 100644 (file)
@@ -1702,7 +1702,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
                                      SHMEM_EEE_ADV_STATUS_SHIFT);
        if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
                DP(BNX2X_MSG_ETHTOOL,
-                  "Direct manipulation of EEE advertisment is not supported\n");
+                  "Direct manipulation of EEE advertisement is not supported\n");
                return -EINVAL;
        }
 
index 6dd0dd0..f6cfdc6 100644 (file)
@@ -9941,7 +9941,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                else
                        rc = bnx2x_8483x_disable_eee(phy, params, vars);
                if (rc) {
-                       DP(NETIF_MSG_LINK, "Failed to set EEE advertisment\n");
+                       DP(NETIF_MSG_LINK, "Failed to set EEE advertisement\n");
                        return rc;
                }
        } else {
@@ -12987,7 +12987,7 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
                DP(NETIF_MSG_LINK, "Analyze TX Fault\n");
                break;
        default:
-               DP(NETIF_MSG_LINK, "Analyze UNKOWN\n");
+               DP(NETIF_MSG_LINK, "Analyze UNKNOWN\n");
        }
        DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
           old_status, status);
index bd1fd3d..01611b3 100644 (file)
@@ -9545,10 +9545,13 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
  */
 static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
 {
-       u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
-       if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
-               BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
-               REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp));
+       if (!CHIP_IS_E1x(bp)) {
+               u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
+               if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
+                       BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
+                       REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
+                              1 << BP_FUNC(bp));
+               }
        }
 }
 
index 32eec15..730ae2c 100644 (file)
@@ -2519,6 +2519,7 @@ int t4_fw_bye(struct adapter *adap, unsigned int mbox)
 {
        struct fw_bye_cmd c;
 
+       memset(&c, 0, sizeof(c));
        INIT_CMD(c, BYE, WRITE);
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
@@ -2535,6 +2536,7 @@ int t4_early_init(struct adapter *adap, unsigned int mbox)
 {
        struct fw_initialize_cmd c;
 
+       memset(&c, 0, sizeof(c));
        INIT_CMD(c, INITIALIZE, WRITE);
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
@@ -2551,6 +2553,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
 {
        struct fw_reset_cmd c;
 
+       memset(&c, 0, sizeof(c));
        INIT_CMD(c, RESET, WRITE);
        c.val = htonl(reset);
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
@@ -2828,7 +2831,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
                     HOSTPAGESIZEPF7(sge_hps));
 
        t4_set_reg_field(adap, SGE_CONTROL,
-                        INGPADBOUNDARY(INGPADBOUNDARY_MASK) |
+                        INGPADBOUNDARY_MASK |
                         EGRSTATUSPAGESIZE_MASK,
                         INGPADBOUNDARY(fl_align_log - 5) |
                         EGRSTATUSPAGESIZE(stat_len != 64));
@@ -3278,6 +3281,7 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
 {
        struct fw_vi_enable_cmd c;
 
+       memset(&c, 0, sizeof(c));
        c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
                             FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
        c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
index 1d03dcd..19ac096 100644 (file)
@@ -1353,8 +1353,11 @@ static int gfar_restore(struct device *dev)
        struct gfar_private *priv = dev_get_drvdata(dev);
        struct net_device *ndev = priv->ndev;
 
-       if (!netif_running(ndev))
+       if (!netif_running(ndev)) {
+               netif_device_attach(ndev);
+
                return 0;
+       }
 
        gfar_init_bds(ndev);
        init_registers(ndev);
index f8064df..60ac46f 100644 (file)
@@ -1860,10 +1860,14 @@ jme_open(struct net_device *netdev)
        jme_clear_pm(jme);
        JME_NAPI_ENABLE(jme);
 
-       tasklet_enable(&jme->linkch_task);
-       tasklet_enable(&jme->txclean_task);
-       tasklet_hi_enable(&jme->rxclean_task);
-       tasklet_hi_enable(&jme->rxempty_task);
+       tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
+                    (unsigned long) jme);
+       tasklet_init(&jme->txclean_task, jme_tx_clean_tasklet,
+                    (unsigned long) jme);
+       tasklet_init(&jme->rxclean_task, jme_rx_clean_tasklet,
+                    (unsigned long) jme);
+       tasklet_init(&jme->rxempty_task, jme_rx_empty_tasklet,
+                    (unsigned long) jme);
 
        rc = jme_request_irq(jme);
        if (rc)
@@ -1948,10 +1952,10 @@ jme_close(struct net_device *netdev)
 
        JME_NAPI_DISABLE(jme);
 
-       tasklet_disable(&jme->linkch_task);
-       tasklet_disable(&jme->txclean_task);
-       tasklet_disable(&jme->rxclean_task);
-       tasklet_disable(&jme->rxempty_task);
+       tasklet_kill(&jme->linkch_task);
+       tasklet_kill(&jme->txclean_task);
+       tasklet_kill(&jme->rxclean_task);
+       tasklet_kill(&jme->rxempty_task);
 
        jme_disable_rx_engine(jme);
        jme_disable_tx_engine(jme);
@@ -3079,22 +3083,6 @@ jme_init_one(struct pci_dev *pdev,
        tasklet_init(&jme->pcc_task,
                     jme_pcc_tasklet,
                     (unsigned long) jme);
-       tasklet_init(&jme->linkch_task,
-                    jme_link_change_tasklet,
-                    (unsigned long) jme);
-       tasklet_init(&jme->txclean_task,
-                    jme_tx_clean_tasklet,
-                    (unsigned long) jme);
-       tasklet_init(&jme->rxclean_task,
-                    jme_rx_clean_tasklet,
-                    (unsigned long) jme);
-       tasklet_init(&jme->rxempty_task,
-                    jme_rx_empty_tasklet,
-                    (unsigned long) jme);
-       tasklet_disable_nosync(&jme->linkch_task);
-       tasklet_disable_nosync(&jme->txclean_task);
-       tasklet_disable_nosync(&jme->rxclean_task);
-       tasklet_disable_nosync(&jme->rxempty_task);
        jme->dpi.cur = PCC_P1;
 
        jme->reg_ghc = 0;
index 9b9c2ac..d19a143 100644 (file)
@@ -4026,7 +4026,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
        dev0 = hw->dev[0];
        unregister_netdev(dev0);
 
-       tasklet_disable(&hw->phy_task);
+       tasklet_kill(&hw->phy_task);
 
        spin_lock_irq(&hw->hw_lock);
        hw->intr_mask = 0;
index 318fee9..69e0197 100644 (file)
@@ -5407,8 +5407,8 @@ static int netdev_close(struct net_device *dev)
                /* Delay for receive task to stop scheduling itself. */
                msleep(2000 / HZ);
 
-               tasklet_disable(&hw_priv->rx_tasklet);
-               tasklet_disable(&hw_priv->tx_tasklet);
+               tasklet_kill(&hw_priv->rx_tasklet);
+               tasklet_kill(&hw_priv->tx_tasklet);
                free_irq(dev->irq, hw_priv->dev);
 
                transmit_cleanup(hw_priv, 0);
@@ -5459,8 +5459,10 @@ static int prepare_hardware(struct net_device *dev)
        rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
        if (rc)
                return rc;
-       tasklet_enable(&hw_priv->rx_tasklet);
-       tasklet_enable(&hw_priv->tx_tasklet);
+       tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
+                    (unsigned long) hw_priv);
+       tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
+                    (unsigned long) hw_priv);
 
        hw->promiscuous = 0;
        hw->all_multi = 0;
@@ -7033,16 +7035,6 @@ static int __devinit pcidev_init(struct pci_dev *pdev,
        spin_lock_init(&hw_priv->hwlock);
        mutex_init(&hw_priv->lock);
 
-       /* tasklet is enabled. */
-       tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
-               (unsigned long) hw_priv);
-       tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
-               (unsigned long) hw_priv);
-
-       /* tasklet_enable will decrement the atomic counter. */
-       tasklet_disable(&hw_priv->rx_tasklet);
-       tasklet_disable(&hw_priv->tx_tasklet);
-
        for (i = 0; i < TOTAL_PORT_NUM; i++)
                init_waitqueue_head(&hw_priv->counter[i].counter);
 
index 1c81825..b01f83a 100644 (file)
@@ -979,17 +979,6 @@ static void cp_init_hw (struct cp_private *cp)
        cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
        cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
 
-       cpw32_f(HiTxRingAddr, 0);
-       cpw32_f(HiTxRingAddr + 4, 0);
-
-       ring_dma = cp->ring_dma;
-       cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
-       cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
-
-       ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
-       cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
-       cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
-
        cp_start_hw(cp);
        cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
 
@@ -1003,6 +992,17 @@ static void cp_init_hw (struct cp_private *cp)
 
        cpw8(Config5, cpr8(Config5) & PMEStatus);
 
+       cpw32_f(HiTxRingAddr, 0);
+       cpw32_f(HiTxRingAddr + 4, 0);
+
+       ring_dma = cp->ring_dma;
+       cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
+       cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
+
+       ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
+       cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
+       cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
+
        cpw16(MultiIntr, 0);
 
        cpw8_f(Cfg9346, Cfg9346_Lock);
index e7ff886..927aa33 100644 (file)
@@ -3827,6 +3827,8 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
        void __iomem *ioaddr = tp->mmio_addr;
 
        switch (tp->mac_version) {
+       case RTL_GIGA_MAC_VER_25:
+       case RTL_GIGA_MAC_VER_26:
        case RTL_GIGA_MAC_VER_29:
        case RTL_GIGA_MAC_VER_30:
        case RTL_GIGA_MAC_VER_32:
@@ -4519,6 +4521,9 @@ static void rtl_set_rx_mode(struct net_device *dev)
                mc_filter[1] = swab32(data);
        }
 
+       if (tp->mac_version == RTL_GIGA_MAC_VER_35)
+               mc_filter[1] = mc_filter[0] = 0xffffffff;
+
        RTL_W32(MAR0 + 4, mc_filter[1]);
        RTL_W32(MAR0 + 0, mc_filter[0]);
 
index fb9f6b3..edf5edb 100644 (file)
@@ -2479,7 +2479,7 @@ static int sis900_resume(struct pci_dev *pci_dev)
        netif_start_queue(net_dev);
 
        /* Workaround for EDB */
-       sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
+       sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
 
        /* Enable all known interrupts by setting the interrupt mask. */
        sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
index 62d1baf..c53c0f4 100644 (file)
@@ -2110,7 +2110,7 @@ static void __devinit smsc911x_read_mac_address(struct net_device *dev)
 static int __devinit smsc911x_init(struct net_device *dev)
 {
        struct smsc911x_data *pdata = netdev_priv(dev);
-       unsigned int byte_test;
+       unsigned int byte_test, mask;
        unsigned int to = 100;
 
        SMSC_TRACE(pdata, probe, "Driver Parameters:");
@@ -2130,9 +2130,22 @@ static int __devinit smsc911x_init(struct net_device *dev)
        /*
         * poll the READY bit in PMT_CTRL. Any other access to the device is
         * forbidden while this bit isn't set. Try for 100ms
+        *
+        * Note that this test is done before the WORD_SWAP register is
+        * programmed. So in some configurations the READY bit is at 16 before
+        * WORD_SWAP is written to. This issue is worked around by waiting
+        * until either bit 0 or bit 16 gets set in PMT_CTRL.
+        *
+        * SMSC has confirmed that checking bit 16 (marked as reserved in
+        * the datasheet) is fine since these bits "will either never be set
+        * or can only go high after READY does (so also indicate the device
+        * is ready)".
         */
-       while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to)
+
+       mask = PMT_CTRL_READY_ | swahw32(PMT_CTRL_READY_);
+       while (!(smsc911x_reg_read(pdata, PMT_CTRL) & mask) && --to)
                udelay(1000);
+
        if (to == 0) {
                pr_err("Device not READY in 100ms aborting\n");
                return -ENODEV;
index 4e98100..66e025a 100644 (file)
@@ -917,7 +917,7 @@ static int tile_net_setup_interrupts(struct net_device *dev)
        ingress_irq = rc;
        tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
        rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
-                        0, NULL, NULL);
+                        0, "tile_net", NULL);
        if (rc != 0) {
                netdev_err(dev, "request_irq failed: %d\n", rc);
                destroy_irq(ingress_irq);
index 0793299..a788501 100644 (file)
@@ -894,6 +894,8 @@ out:
        return IRQ_HANDLED;
 }
 
+static void axienet_dma_err_handler(unsigned long data);
+
 /**
  * axienet_open - Driver open routine.
  * @ndev:      Pointer to net_device structure
@@ -942,6 +944,10 @@ static int axienet_open(struct net_device *ndev)
                phy_start(lp->phy_dev);
        }
 
+       /* Enable tasklets for Axi DMA error handling */
+       tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
+                    (unsigned long) lp);
+
        /* Enable interrupts for Axi DMA Tx */
        ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
        if (ret)
@@ -950,8 +956,7 @@ static int axienet_open(struct net_device *ndev)
        ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
        if (ret)
                goto err_rx_irq;
-       /* Enable tasklets for Axi DMA error handling */
-       tasklet_enable(&lp->dma_err_tasklet);
+
        return 0;
 
 err_rx_irq:
@@ -960,6 +965,7 @@ err_tx_irq:
        if (lp->phy_dev)
                phy_disconnect(lp->phy_dev);
        lp->phy_dev = NULL;
+       tasklet_kill(&lp->dma_err_tasklet);
        dev_err(lp->dev, "request_irq() failed\n");
        return ret;
 }
@@ -990,7 +996,7 @@ static int axienet_stop(struct net_device *ndev)
        axienet_setoptions(ndev, lp->options &
                           ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 
-       tasklet_disable(&lp->dma_err_tasklet);
+       tasklet_kill(&lp->dma_err_tasklet);
 
        free_irq(lp->tx_irq, ndev);
        free_irq(lp->rx_irq, ndev);
@@ -1613,10 +1619,6 @@ static int __devinit axienet_of_probe(struct platform_device *op)
                goto err_iounmap_2;
        }
 
-       tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
-                    (unsigned long) lp);
-       tasklet_disable(&lp->dma_err_tasklet);
-
        return 0;
 
 err_iounmap_2:
index 98934bd..477d672 100644 (file)
@@ -1102,10 +1102,12 @@ static int init_queues(struct port *port)
 {
        int i;
 
-       if (!ports_open)
-               if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
-                                                POOL_ALLOC_SIZE, 32, 0)))
+       if (!ports_open) {
+               dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
+                                          POOL_ALLOC_SIZE, 32, 0);
+               if (!dma_pool)
                        return -ENOMEM;
+       }
 
        if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
                                              &port->desc_tab_phys)))
index 5039f08..43e9ab4 100644 (file)
@@ -222,7 +222,7 @@ static void sirdev_config_fsm(struct work_struct *work)
                        break;
 
                case SIRDEV_STATE_DONGLE_SPEED:
-                       if (dev->dongle_drv->reset) {
+                       if (dev->dongle_drv->set_speed) {
                                ret = dev->dongle_drv->set_speed(dev, fsm->param);
                                if (ret < 0) {
                                        fsm->result = ret;
index 6428fcb..daec9b0 100644 (file)
@@ -234,7 +234,6 @@ void free_mdio_bitbang(struct mii_bus *bus)
        struct mdiobb_ctrl *ctrl = bus->priv;
 
        module_put(ctrl->ops->owner);
-       mdiobus_unregister(bus);
        mdiobus_free(bus);
 }
 EXPORT_SYMBOL(free_mdio_bitbang);
index 899274f..2ed1140 100644 (file)
@@ -185,17 +185,20 @@ static int __devinit mdio_gpio_probe(struct platform_device *pdev)
 {
        struct mdio_gpio_platform_data *pdata;
        struct mii_bus *new_bus;
-       int ret;
+       int ret, bus_id;
 
-       if (pdev->dev.of_node)
+       if (pdev->dev.of_node) {
                pdata = mdio_gpio_of_get_data(pdev);
-       else
+               bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
+       } else {
                pdata = pdev->dev.platform_data;
+               bus_id = pdev->id;
+       }
 
        if (!pdata)
                return -ENODEV;
 
-       new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, pdev->id);
+       new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, bus_id);
        if (!new_bus)
                return -ENODEV;
 
index 9db0171..c5db428 100644 (file)
@@ -29,8 +29,8 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb)
                        if (last) {
                                skb2 = skb_clone(skb, GFP_ATOMIC);
                                if (skb2) {
-                                       ret = team_dev_queue_xmit(team, last,
-                                                                 skb2);
+                                       ret = !team_dev_queue_xmit(team, last,
+                                                                  skb2);
                                        if (!sum_ret)
                                                sum_ret = ret;
                                }
@@ -39,7 +39,7 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb)
                }
        }
        if (last) {
-               ret = team_dev_queue_xmit(team, last, skb);
+               ret = !team_dev_queue_xmit(team, last, skb);
                if (!sum_ret)
                        sum_ret = ret;
        }
index c81e278..08d55b6 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/usb/cdc.h>
 #include <linux/usb/usbnet.h>
 #include <linux/gfp.h>
+#include <linux/if_vlan.h>
 
 
 /*
@@ -92,7 +93,7 @@ static int eem_bind(struct usbnet *dev, struct usb_interface *intf)
 
        /* no jumbogram (16K) support for now */
 
-       dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN;
+       dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN + VLAN_HLEN;
        dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
 
        return 0;
index 4cd582a..74fab1a 100644 (file)
@@ -540,10 +540,12 @@ advance:
            (ctx->ether_desc == NULL) || (ctx->control != intf))
                goto error;
 
-       /* claim interfaces, if any */
-       temp = usb_driver_claim_interface(driver, ctx->data, dev);
-       if (temp)
-               goto error;
+       /* claim data interface, if different from control */
+       if (ctx->data != ctx->control) {
+               temp = usb_driver_claim_interface(driver, ctx->data, dev);
+               if (temp)
+                       goto error;
+       }
 
        iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
 
@@ -623,6 +625,10 @@ static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
 
        tasklet_kill(&ctx->bh);
 
+       /* handle devices with combined control and data interface */
+       if (ctx->control == ctx->data)
+               ctx->data = NULL;
+
        /* disconnect master --> disconnect slave */
        if (intf == ctx->control && ctx->data) {
                usb_set_intfdata(ctx->data, NULL);
@@ -1245,6 +1251,14 @@ static const struct usb_device_id cdc_devs[] = {
          .driver_info = (unsigned long) &wwan_info,
        },
 
+       /* Huawei NCM devices disguised as vendor specific */
+       { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
+         .driver_info = (unsigned long)&wwan_info,
+       },
+       { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
+         .driver_info = (unsigned long)&wwan_info,
+       },
+
        /* Generic CDC-NCM devices */
        { USB_INTERFACE_INFO(USB_CLASS_COMM,
                USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
index 7479a57..362cb8c 100644 (file)
@@ -184,7 +184,7 @@ static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
        /* set the address, index & direction (read from PHY) */
        phy_id &= dev->mii.phy_id_mask;
        idx &= dev->mii.reg_num_mask;
-       addr = (phy_id << 11) | (idx << 6) | MII_READ_;
+       addr = (phy_id << 11) | (idx << 6) | MII_READ_ | MII_BUSY_;
        ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
        check_warn_goto_done(ret, "Error writing MII_ADDR");
 
@@ -221,7 +221,7 @@ static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
        /* set the address, index & direction (write to PHY) */
        phy_id &= dev->mii.phy_id_mask;
        idx &= dev->mii.reg_num_mask;
-       addr = (phy_id << 11) | (idx << 6) | MII_WRITE_;
+       addr = (phy_id << 11) | (idx << 6) | MII_WRITE_ | MII_BUSY_;
        ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
        check_warn_goto_done(ret, "Error writing MII_ADDR");
 
@@ -1344,6 +1344,7 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
                } else {
                        u32 csum_preamble = smsc95xx_calc_csum_preamble(skb);
                        skb_push(skb, 4);
+                       cpu_to_le32s(&csum_preamble);
                        memcpy(skb->data, &csum_preamble, 4);
                }
        }
index cb04f90..edb81ed 100644 (file)
@@ -359,10 +359,12 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
 void usbnet_defer_kevent (struct usbnet *dev, int work)
 {
        set_bit (work, &dev->flags);
-       if (!schedule_work (&dev->kevent))
-               netdev_err(dev->net, "kevent %d may have been dropped\n", work);
-       else
+       if (!schedule_work (&dev->kevent)) {
+               if (net_ratelimit())
+                       netdev_err(dev->net, "kevent %d may have been dropped\n", work);
+       } else {
                netdev_dbg(dev->net, "kevent %d scheduled\n", work);
+       }
 }
 EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
 
index 7b4adde..8b5c619 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * VXLAN: Virtual eXtensiable Local Area Network
+ * VXLAN: Virtual eXtensible Local Area Network
  *
  * Copyright (c) 2012 Vyatta Inc.
  *
@@ -50,8 +50,8 @@
 
 #define VXLAN_N_VID    (1u << 24)
 #define VXLAN_VID_MASK (VXLAN_N_VID - 1)
-/* VLAN + IP header + UDP + VXLAN */
-#define VXLAN_HEADROOM (4 + 20 + 8 + 8)
+/* IP header + UDP + VXLAN + Ethernet header */
+#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
 
 #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
 
@@ -1102,6 +1102,10 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
 
                if (!tb[IFLA_MTU])
                        dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
+
+               /* update header length based on lower device */
+               dev->hard_header_len = lowerdev->hard_header_len +
+                                      VXLAN_HEADROOM;
        }
 
        if (data[IFLA_VXLAN_TOS])
index 3f575af..e9a3da5 100644 (file)
@@ -969,10 +969,12 @@ static int init_hdlc_queues(struct port *port)
 {
        int i;
 
-       if (!ports_open)
-               if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
-                                                POOL_ALLOC_SIZE, 32, 0)))
+       if (!ports_open) {
+               dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
+                                          POOL_ALLOC_SIZE, 32, 0);
+               if (!dma_pool)
                        return -ENOMEM;
+       }
 
        if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
                                              &port->desc_tab_phys)))
index 8e1559a..1829b44 100644 (file)
@@ -1456,7 +1456,7 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
        switch (type) {
        case ATH9K_RESET_POWER_ON:
                ret = ath9k_hw_set_reset_power_on(ah);
-               if (!ret)
+               if (ret)
                        ah->reset_power_on = true;
                break;
        case ATH9K_RESET_WARM:
index 192251a..282eede 100644 (file)
@@ -382,7 +382,7 @@ static void cancel_transfers(struct b43legacy_pioqueue *queue)
 {
        struct b43legacy_pio_txpacket *packet, *tmp_packet;
 
-       tasklet_disable(&queue->txtask);
+       tasklet_kill(&queue->txtask);
 
        list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list)
                free_txpacket(packet, 0);
index a6f1e81..481345c 100644 (file)
@@ -4401,7 +4401,7 @@ static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
 
 static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
 {
-#ifndef CONFIG_BRCMFISCAN
+#ifndef CONFIG_BRCMISCAN
        /* scheduled scan settings */
        wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
        wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
index ff8162d..2d9eee9 100644 (file)
@@ -521,7 +521,7 @@ static void iwlagn_mac_tx(struct ieee80211_hw *hw,
                     ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
 
        if (iwlagn_tx_skb(priv, control->sta, skb))
-               dev_kfree_skb_any(skb);
+               ieee80211_free_txskb(hw, skb);
 }
 
 static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
@@ -1354,6 +1354,20 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
        vif_priv->ctx = ctx;
        ctx->vif = vif;
 
+       /*
+        * In SNIFFER device type, the firmware reports the FCS to
+        * the host, rather than snipping it off. Unfortunately,
+        * mac80211 doesn't (yet) provide a per-packet flag for
+        * this, so that we have to set the hardware flag based
+        * on the interfaces added. As the monitor interface can
+        * only be present by itself, and will be removed before
+        * other interfaces are added, this is safe.
+        */
+       if (vif->type == NL80211_IFTYPE_MONITOR)
+               priv->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS;
+       else
+               priv->hw->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS;
+
        err = iwl_setup_interface(priv, ctx);
        if (!err || reset)
                goto out;
index 7ff3f14..408132c 100644 (file)
@@ -2114,7 +2114,7 @@ static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
 
        info = IEEE80211_SKB_CB(skb);
        iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
-       dev_kfree_skb_any(skb);
+       ieee80211_free_txskb(priv->hw, skb);
 }
 
 static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
index 17c8e5d..bb69f8f 100644 (file)
@@ -321,6 +321,14 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
                        dma_map_page(trans->dev, page, 0,
                                     PAGE_SIZE << trans_pcie->rx_page_order,
                                     DMA_FROM_DEVICE);
+               if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+                       rxb->page = NULL;
+                       spin_lock_irqsave(&rxq->lock, flags);
+                       list_add(&rxb->list, &rxq->rx_used);
+                       spin_unlock_irqrestore(&rxq->lock, flags);
+                       __free_pages(page, trans_pcie->rx_page_order);
+                       return;
+               }
                /* dma address must be no more than 36 bits */
                BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
                /* and also 256 byte aligned! */
@@ -488,8 +496,19 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
                        dma_map_page(trans->dev, rxb->page, 0,
                                     PAGE_SIZE << trans_pcie->rx_page_order,
                                     DMA_FROM_DEVICE);
-               list_add_tail(&rxb->list, &rxq->rx_free);
-               rxq->free_count++;
+               if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+                       /*
+                        * free the page(s) as well to not break
+                        * the invariant that the items on the used
+                        * list have no page(s)
+                        */
+                       __free_pages(rxb->page, trans_pcie->rx_page_order);
+                       rxb->page = NULL;
+                       list_add_tail(&rxb->list, &rxq->rx_used);
+               } else {
+                       list_add_tail(&rxb->list, &rxq->rx_free);
+                       rxq->free_count++;
+               }
        } else
                list_add_tail(&rxb->list, &rxq->rx_used);
        spin_unlock_irqrestore(&rxq->lock, flags);
index 105e3af..79a4ddc 100644 (file)
@@ -480,20 +480,12 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       u16 rd_ptr, wr_ptr;
-       int n_bd = trans_pcie->txq[txq_id].q.n_bd;
 
        if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
                WARN_ONCE(1, "queue %d not used", txq_id);
                return;
        }
 
-       rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1);
-       wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id));
-
-       WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]",
-                 txq_id, rd_ptr, wr_ptr);
-
        iwl_txq_set_inactive(trans, txq_id);
        IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
 }
index 8d46510..ae9010e 100644 (file)
@@ -890,9 +890,6 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
                return;
        }
        cmd_node = adapter->curr_cmd;
-       if (cmd_node->wait_q_enabled)
-               adapter->cmd_wait_q.status = -ETIMEDOUT;
-
        if (cmd_node) {
                adapter->dbg.timeout_cmd_id =
                        adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index];
@@ -938,6 +935,14 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
 
                dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n",
                        adapter->ps_mode, adapter->ps_state);
+
+               if (cmd_node->wait_q_enabled) {
+                       adapter->cmd_wait_q.status = -ETIMEDOUT;
+                       wake_up_interruptible(&adapter->cmd_wait_q.wait);
+                       mwifiex_cancel_pending_ioctl(adapter);
+                       /* reset cmd_sent flag to unblock new commands */
+                       adapter->cmd_sent = false;
+               }
        }
        if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
                mwifiex_init_fw_complete(adapter);
index fc8a9bf..82cf0fa 100644 (file)
@@ -161,7 +161,6 @@ static int mwifiex_sdio_suspend(struct device *dev)
        struct sdio_mmc_card *card;
        struct mwifiex_adapter *adapter;
        mmc_pm_flag_t pm_flag = 0;
-       int hs_actived = 0;
        int i;
        int ret = 0;
 
@@ -188,12 +187,14 @@ static int mwifiex_sdio_suspend(struct device *dev)
        adapter = card->adapter;
 
        /* Enable the Host Sleep */
-       hs_actived = mwifiex_enable_hs(adapter);
-       if (hs_actived) {
-               pr_debug("cmd: suspend with MMC_PM_KEEP_POWER\n");
-               ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+       if (!mwifiex_enable_hs(adapter)) {
+               dev_err(adapter->dev, "cmd: failed to suspend\n");
+               return -EFAULT;
        }
 
+       dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n");
+       ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+
        /* Indicate device suspended */
        adapter->is_suspended = true;
 
index 9970c2b..b7e6607 100644 (file)
@@ -297,6 +297,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        /*=== Customer ID ===*/
        /****** 8188CU ********/
        {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/
+       {RTL_USB_DEVICE(0x050d, 0x11f2, rtl92cu_hal_cfg)}, /*Belkin - ISY*/
        {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
        {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
        {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
index caa0110..fc24eb9 100644 (file)
@@ -452,29 +452,85 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
        /* Grant backend access to each skb fragment page. */
        for (i = 0; i < frags; i++) {
                skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+               struct page *page = skb_frag_page(frag);
 
-               tx->flags |= XEN_NETTXF_more_data;
+               len = skb_frag_size(frag);
+               offset = frag->page_offset;
 
-               id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
-               np->tx_skbs[id].skb = skb_get(skb);
-               tx = RING_GET_REQUEST(&np->tx, prod++);
-               tx->id = id;
-               ref = gnttab_claim_grant_reference(&np->gref_tx_head);
-               BUG_ON((signed short)ref < 0);
+               /* Data must not cross a page boundary. */
+               BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
 
-               mfn = pfn_to_mfn(page_to_pfn(skb_frag_page(frag)));
-               gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
-                                               mfn, GNTMAP_readonly);
+               /* Skip unused frames from start of page */
+               page += offset >> PAGE_SHIFT;
+               offset &= ~PAGE_MASK;
 
-               tx->gref = np->grant_tx_ref[id] = ref;
-               tx->offset = frag->page_offset;
-               tx->size = skb_frag_size(frag);
-               tx->flags = 0;
+               while (len > 0) {
+                       unsigned long bytes;
+
+                       BUG_ON(offset >= PAGE_SIZE);
+
+                       bytes = PAGE_SIZE - offset;
+                       if (bytes > len)
+                               bytes = len;
+
+                       tx->flags |= XEN_NETTXF_more_data;
+
+                       id = get_id_from_freelist(&np->tx_skb_freelist,
+                                                 np->tx_skbs);
+                       np->tx_skbs[id].skb = skb_get(skb);
+                       tx = RING_GET_REQUEST(&np->tx, prod++);
+                       tx->id = id;
+                       ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+                       BUG_ON((signed short)ref < 0);
+
+                       mfn = pfn_to_mfn(page_to_pfn(page));
+                       gnttab_grant_foreign_access_ref(ref,
+                                                       np->xbdev->otherend_id,
+                                                       mfn, GNTMAP_readonly);
+
+                       tx->gref = np->grant_tx_ref[id] = ref;
+                       tx->offset = offset;
+                       tx->size = bytes;
+                       tx->flags = 0;
+
+                       offset += bytes;
+                       len -= bytes;
+
+                       /* Next frame */
+                       if (offset == PAGE_SIZE && len) {
+                               BUG_ON(!PageCompound(page));
+                               page++;
+                               offset = 0;
+                       }
+               }
        }
 
        np->tx.req_prod_pvt = prod;
 }
 
+/*
+ * Count how many ring slots are required to send the frags of this
+ * skb. Each frag might be a compound page.
+ */
+static int xennet_count_skb_frag_slots(struct sk_buff *skb)
+{
+       int i, frags = skb_shinfo(skb)->nr_frags;
+       int pages = 0;
+
+       for (i = 0; i < frags; i++) {
+               skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+               unsigned long size = skb_frag_size(frag);
+               unsigned long offset = frag->page_offset;
+
+               /* Skip unused frames from start of page */
+               offset &= ~PAGE_MASK;
+
+               pages += PFN_UP(offset + size);
+       }
+
+       return pages;
+}
+
 static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        unsigned short id;
@@ -487,23 +543,23 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        grant_ref_t ref;
        unsigned long mfn;
        int notify;
-       int frags = skb_shinfo(skb)->nr_frags;
+       int slots;
        unsigned int offset = offset_in_page(data);
        unsigned int len = skb_headlen(skb);
        unsigned long flags;
 
-       frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
-       if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
-               printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
-                      frags);
-               dump_stack();
+       slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
+               xennet_count_skb_frag_slots(skb);
+       if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
+               net_alert_ratelimited(
+                       "xennet: skb rides the rocket: %d slots\n", slots);
                goto drop;
        }
 
        spin_lock_irqsave(&np->tx_lock, flags);
 
        if (unlikely(!netif_carrier_ok(dev) ||
-                    (frags > 1 && !xennet_can_sg(dev)) ||
+                    (slots > 1 && !xennet_can_sg(dev)) ||
                     netif_needs_gso(skb, netif_skb_features(skb)))) {
                spin_unlock_irqrestore(&np->tx_lock, flags);
                goto drop;
index 97c440a..30ae18a 100644 (file)
@@ -698,13 +698,14 @@ static void pn533_wq_cmd(struct work_struct *work)
 
        cmd = list_first_entry(&dev->cmd_queue, struct pn533_cmd, queue);
 
+       list_del(&cmd->queue);
+
        mutex_unlock(&dev->cmd_lock);
 
        __pn533_send_cmd_frame_async(dev, cmd->out_frame, cmd->in_frame,
                                     cmd->in_frame_len, cmd->cmd_complete,
                                     cmd->arg, cmd->flags);
 
-       list_del(&cmd->queue);
        kfree(cmd);
 }
 
@@ -1678,11 +1679,14 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev,
 static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
                                                u8 *params, int params_len)
 {
-       struct pn533_cmd_jump_dep *cmd;
        struct pn533_cmd_jump_dep_response *resp;
        struct nfc_target nfc_target;
        u8 target_gt_len;
        int rc;
+       struct pn533_cmd_jump_dep *cmd = (struct pn533_cmd_jump_dep *)arg;
+       u8 active = cmd->active;
+
+       kfree(arg);
 
        if (params_len == -ENOENT) {
                nfc_dev_dbg(&dev->interface->dev, "");
@@ -1704,7 +1708,6 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
        }
 
        resp = (struct pn533_cmd_jump_dep_response *) params;
-       cmd = (struct pn533_cmd_jump_dep *) arg;
        rc = resp->status & PN533_CMD_RET_MASK;
        if (rc != PN533_CMD_RET_SUCCESS) {
                nfc_dev_err(&dev->interface->dev,
@@ -1734,7 +1737,7 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
        if (rc == 0)
                rc = nfc_dep_link_is_up(dev->nfc_dev,
                                                dev->nfc_dev->targets[0].idx,
-                                               !cmd->active, NFC_RF_INITIATOR);
+                                               !active, NFC_RF_INITIATOR);
 
        return 0;
 }
@@ -1819,12 +1822,8 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
        rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
                                dev->in_maxlen, pn533_in_dep_link_up_complete,
                                cmd, GFP_KERNEL);
-       if (rc)
-               goto out;
-
-
-out:
-       kfree(cmd);
+       if (rc < 0)
+               kfree(cmd);
 
        return rc;
 }
@@ -2078,8 +2077,12 @@ error:
 static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
                                  u8 *params, int params_len)
 {
+       struct sk_buff *skb_out = arg;
+
        nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
 
+       dev_kfree_skb(skb_out);
+
        if (params_len < 0) {
                nfc_dev_err(&dev->interface->dev,
                            "Error %d when sending data",
@@ -2117,7 +2120,7 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
 
        rc = pn533_send_cmd_frame_async(dev, out_frame, dev->in_frame,
                                        dev->in_maxlen, pn533_tm_send_complete,
-                                       NULL, GFP_KERNEL);
+                                       skb, GFP_KERNEL);
        if (rc) {
                nfc_dev_err(&dev->interface->dev,
                            "Error %d when trying to send data", rc);
index 6241fd0..a543746 100644 (file)
@@ -320,10 +320,7 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
                } else
                        next = dev->bus_list.next;
 
-               /* Run device routines with the device locked */
-               device_lock(&dev->dev);
                retval = cb(dev, userdata);
-               device_unlock(&dev->dev);
                if (retval)
                        break;
        }
index 94c6e2a..6c94fc9 100644 (file)
@@ -398,6 +398,8 @@ static void pci_device_shutdown(struct device *dev)
        struct pci_dev *pci_dev = to_pci_dev(dev);
        struct pci_driver *drv = pci_dev->driver;
 
+       pm_runtime_resume(dev);
+
        if (drv && drv->shutdown)
                drv->shutdown(pci_dev);
        pci_msi_shutdown(pci_dev);
@@ -408,16 +410,6 @@ static void pci_device_shutdown(struct device *dev)
         * continue to do DMA
         */
        pci_disable_device(pci_dev);
-
-       /*
-        * Devices may be enabled to wake up by runtime PM, but they need not
-        * be supposed to wake up the system from its "power off" state (e.g.
-        * ACPI S5).  Therefore disable wakeup for all devices that aren't
-        * supposed to wake up the system at this point.  The state argument
-        * will be ignored by pci_enable_wake().
-        */
-       if (!device_may_wakeup(dev))
-               pci_enable_wake(pci_dev, PCI_UNKNOWN, false);
 }
 
 #ifdef CONFIG_PM
index 02d107b..f39378d 100644 (file)
@@ -458,40 +458,6 @@ boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
 }
 struct device_attribute vga_attr = __ATTR_RO(boot_vga);
 
-static void
-pci_config_pm_runtime_get(struct pci_dev *pdev)
-{
-       struct device *dev = &pdev->dev;
-       struct device *parent = dev->parent;
-
-       if (parent)
-               pm_runtime_get_sync(parent);
-       pm_runtime_get_noresume(dev);
-       /*
-        * pdev->current_state is set to PCI_D3cold during suspending,
-        * so wait until suspending completes
-        */
-       pm_runtime_barrier(dev);
-       /*
-        * Only need to resume devices in D3cold, because config
-        * registers are still accessible for devices suspended but
-        * not in D3cold.
-        */
-       if (pdev->current_state == PCI_D3cold)
-               pm_runtime_resume(dev);
-}
-
-static void
-pci_config_pm_runtime_put(struct pci_dev *pdev)
-{
-       struct device *dev = &pdev->dev;
-       struct device *parent = dev->parent;
-
-       pm_runtime_put(dev);
-       if (parent)
-               pm_runtime_put_sync(parent);
-}
-
 static ssize_t
 pci_read_config(struct file *filp, struct kobject *kobj,
                struct bin_attribute *bin_attr,
index 5485883..aabf647 100644 (file)
@@ -1858,6 +1858,38 @@ bool pci_dev_run_wake(struct pci_dev *dev)
 }
 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
 
+void pci_config_pm_runtime_get(struct pci_dev *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device *parent = dev->parent;
+
+       if (parent)
+               pm_runtime_get_sync(parent);
+       pm_runtime_get_noresume(dev);
+       /*
+        * pdev->current_state is set to PCI_D3cold during suspending,
+        * so wait until suspending completes
+        */
+       pm_runtime_barrier(dev);
+       /*
+        * Only need to resume devices in D3cold, because config
+        * registers are still accessible for devices suspended but
+        * not in D3cold.
+        */
+       if (pdev->current_state == PCI_D3cold)
+               pm_runtime_resume(dev);
+}
+
+void pci_config_pm_runtime_put(struct pci_dev *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device *parent = dev->parent;
+
+       pm_runtime_put(dev);
+       if (parent)
+               pm_runtime_put_sync(parent);
+}
+
 /**
  * pci_pm_init - Initialize PM functions of given PCI device
  * @dev: PCI device to handle.
index bacbcba..fd92aab 100644 (file)
@@ -72,6 +72,8 @@ extern void pci_disable_enabled_device(struct pci_dev *dev);
 extern int pci_finish_runtime_suspend(struct pci_dev *dev);
 extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
 extern void pci_wakeup_bus(struct pci_bus *bus);
+extern void pci_config_pm_runtime_get(struct pci_dev *dev);
+extern void pci_config_pm_runtime_put(struct pci_dev *dev);
 extern void pci_pm_init(struct pci_dev *dev);
 extern void platform_pci_wakeup_init(struct pci_dev *dev);
 extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
index 06bad96..af4e31c 100644 (file)
@@ -213,6 +213,7 @@ static int report_error_detected(struct pci_dev *dev, void *data)
        struct aer_broadcast_data *result_data;
        result_data = (struct aer_broadcast_data *) data;
 
+       device_lock(&dev->dev);
        dev->error_state = result_data->state;
 
        if (!dev->driver ||
@@ -231,12 +232,14 @@ static int report_error_detected(struct pci_dev *dev, void *data)
                                   dev->driver ?
                                   "no AER-aware driver" : "no driver");
                }
-               return 0;
+               goto out;
        }
 
        err_handler = dev->driver->err_handler;
        vote = err_handler->error_detected(dev, result_data->state);
        result_data->result = merge_result(result_data->result, vote);
+out:
+       device_unlock(&dev->dev);
        return 0;
 }
 
@@ -247,14 +250,17 @@ static int report_mmio_enabled(struct pci_dev *dev, void *data)
        struct aer_broadcast_data *result_data;
        result_data = (struct aer_broadcast_data *) data;
 
+       device_lock(&dev->dev);
        if (!dev->driver ||
                !dev->driver->err_handler ||
                !dev->driver->err_handler->mmio_enabled)
-               return 0;
+               goto out;
 
        err_handler = dev->driver->err_handler;
        vote = err_handler->mmio_enabled(dev);
        result_data->result = merge_result(result_data->result, vote);
+out:
+       device_unlock(&dev->dev);
        return 0;
 }
 
@@ -265,14 +271,17 @@ static int report_slot_reset(struct pci_dev *dev, void *data)
        struct aer_broadcast_data *result_data;
        result_data = (struct aer_broadcast_data *) data;
 
+       device_lock(&dev->dev);
        if (!dev->driver ||
                !dev->driver->err_handler ||
                !dev->driver->err_handler->slot_reset)
-               return 0;
+               goto out;
 
        err_handler = dev->driver->err_handler;
        vote = err_handler->slot_reset(dev);
        result_data->result = merge_result(result_data->result, vote);
+out:
+       device_unlock(&dev->dev);
        return 0;
 }
 
@@ -280,15 +289,18 @@ static int report_resume(struct pci_dev *dev, void *data)
 {
        const struct pci_error_handlers *err_handler;
 
+       device_lock(&dev->dev);
        dev->error_state = pci_channel_io_normal;
 
        if (!dev->driver ||
                !dev->driver->err_handler ||
                !dev->driver->err_handler->resume)
-               return 0;
+               goto out;
 
        err_handler = dev->driver->err_handler;
        err_handler->resume(dev);
+out:
+       device_unlock(&dev->dev);
        return 0;
 }
 
index d03a7a3..ed129b4 100644 (file)
@@ -272,7 +272,8 @@ static int get_port_device_capability(struct pci_dev *dev)
        }
 
        /* Hot-Plug Capable */
-       if (cap_mask & PCIE_PORT_SERVICE_HP) {
+       if ((cap_mask & PCIE_PORT_SERVICE_HP) &&
+           dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT) {
                pcie_capability_read_dword(dev, PCI_EXP_SLTCAP, &reg32);
                if (reg32 & PCI_EXP_SLTCAP_HPC) {
                        services |= PCIE_PORT_SERVICE_HP;
index eb907a8..9b8505c 100644 (file)
@@ -76,6 +76,8 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp
        if (!access_ok(VERIFY_WRITE, buf, cnt))
                return -EINVAL;
 
+       pci_config_pm_runtime_get(dev);
+
        if ((pos & 1) && cnt) {
                unsigned char val;
                pci_user_read_config_byte(dev, pos, &val);
@@ -121,6 +123,8 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp
                cnt--;
        }
 
+       pci_config_pm_runtime_put(dev);
+
        *ppos = pos;
        return nbytes;
 }
@@ -146,6 +150,8 @@ proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, lof
        if (!access_ok(VERIFY_READ, buf, cnt))
                return -EINVAL;
 
+       pci_config_pm_runtime_get(dev);
+
        if ((pos & 1) && cnt) {
                unsigned char val;
                __get_user(val, buf);
@@ -191,6 +197,8 @@ proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, lof
                cnt--;
        }
 
+       pci_config_pm_runtime_put(dev);
+
        *ppos = pos;
        i_size_write(ino, dp->size);
        return nbytes;
index 7bf914d..aeecf0f 100644 (file)
@@ -178,12 +178,14 @@ config PINCTRL_COH901
          ports of 8 GPIO pins each.
 
 config PINCTRL_SAMSUNG
-       bool "Samsung pinctrl driver"
+       bool
+       depends on OF && GPIOLIB
        select PINMUX
        select PINCONF
 
 config PINCTRL_EXYNOS4
        bool "Pinctrl driver data for Exynos4 SoC"
+       depends on OF && GPIOLIB
        select PINCTRL_SAMSUNG
 
 config PINCTRL_MVEBU
index 5d4f44f..b1fd6ee 100644 (file)
@@ -244,7 +244,7 @@ static int spear_pinctrl_endisable(struct pinctrl_dev *pctldev,
                        else
                                temp = ~muxreg->val;
 
-                       val |= temp;
+                       val |= muxreg->mask & temp;
                        pmx_writel(pmx, val, muxreg->reg);
                }
        }
index d6cca8c..0436fc7 100644 (file)
@@ -25,8 +25,8 @@ static const struct pinctrl_pin_desc spear1310_pins[] = {
 };
 
 /* registers */
-#define PERIP_CFG                                      0x32C
-       #define MCIF_SEL_SHIFT                          3
+#define PERIP_CFG                                      0x3B0
+       #define MCIF_SEL_SHIFT                          5
        #define MCIF_SEL_SD                             (0x1 << MCIF_SEL_SHIFT)
        #define MCIF_SEL_CF                             (0x2 << MCIF_SEL_SHIFT)
        #define MCIF_SEL_XD                             (0x3 << MCIF_SEL_SHIFT)
@@ -164,6 +164,10 @@ static const struct pinctrl_pin_desc spear1310_pins[] = {
        #define PMX_SSP0_CS0_MASK                       (1 << 29)
        #define PMX_SSP0_CS1_2_MASK                     (1 << 30)
 
+#define PAD_DIRECTION_SEL_0                            0x65C
+#define PAD_DIRECTION_SEL_1                            0x660
+#define PAD_DIRECTION_SEL_2                            0x664
+
 /* combined macros */
 #define PMX_GMII_MASK          (PMX_GMIICLK_MASK |                     \
                                PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK |  \
@@ -237,6 +241,10 @@ static struct spear_muxreg i2c0_muxreg[] = {
                .reg = PAD_FUNCTION_EN_0,
                .mask = PMX_I2C0_MASK,
                .val = PMX_I2C0_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_0,
+               .mask = PMX_I2C0_MASK,
+               .val = PMX_I2C0_MASK,
        },
 };
 
@@ -269,6 +277,10 @@ static struct spear_muxreg ssp0_muxreg[] = {
                .reg = PAD_FUNCTION_EN_0,
                .mask = PMX_SSP0_MASK,
                .val = PMX_SSP0_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_0,
+               .mask = PMX_SSP0_MASK,
+               .val = PMX_SSP0_MASK,
        },
 };
 
@@ -294,6 +306,10 @@ static struct spear_muxreg ssp0_cs0_muxreg[] = {
                .reg = PAD_FUNCTION_EN_2,
                .mask = PMX_SSP0_CS0_MASK,
                .val = PMX_SSP0_CS0_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_2,
+               .mask = PMX_SSP0_CS0_MASK,
+               .val = PMX_SSP0_CS0_MASK,
        },
 };
 
@@ -319,6 +335,10 @@ static struct spear_muxreg ssp0_cs1_2_muxreg[] = {
                .reg = PAD_FUNCTION_EN_2,
                .mask = PMX_SSP0_CS1_2_MASK,
                .val = PMX_SSP0_CS1_2_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_2,
+               .mask = PMX_SSP0_CS1_2_MASK,
+               .val = PMX_SSP0_CS1_2_MASK,
        },
 };
 
@@ -352,6 +372,10 @@ static struct spear_muxreg i2s0_muxreg[] = {
                .reg = PAD_FUNCTION_EN_0,
                .mask = PMX_I2S0_MASK,
                .val = PMX_I2S0_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_0,
+               .mask = PMX_I2S0_MASK,
+               .val = PMX_I2S0_MASK,
        },
 };
 
@@ -384,6 +408,10 @@ static struct spear_muxreg i2s1_muxreg[] = {
                .reg = PAD_FUNCTION_EN_1,
                .mask = PMX_I2S1_MASK,
                .val = PMX_I2S1_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_I2S1_MASK,
+               .val = PMX_I2S1_MASK,
        },
 };
 
@@ -418,6 +446,10 @@ static struct spear_muxreg clcd_muxreg[] = {
                .reg = PAD_FUNCTION_EN_0,
                .mask = PMX_CLCD1_MASK,
                .val = PMX_CLCD1_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_0,
+               .mask = PMX_CLCD1_MASK,
+               .val = PMX_CLCD1_MASK,
        },
 };
 
@@ -443,6 +475,10 @@ static struct spear_muxreg clcd_high_res_muxreg[] = {
                .reg = PAD_FUNCTION_EN_1,
                .mask = PMX_CLCD2_MASK,
                .val = PMX_CLCD2_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_CLCD2_MASK,
+               .val = PMX_CLCD2_MASK,
        },
 };
 
@@ -461,7 +497,7 @@ static struct spear_pingroup clcd_high_res_pingroup = {
        .nmodemuxs = ARRAY_SIZE(clcd_high_res_modemux),
 };
 
-static const char *const clcd_grps[] = { "clcd_grp", "clcd_high_res" };
+static const char *const clcd_grps[] = { "clcd_grp", "clcd_high_res_grp" };
 static struct spear_function clcd_function = {
        .name = "clcd",
        .groups = clcd_grps,
@@ -479,6 +515,14 @@ static struct spear_muxreg arm_gpio_muxreg[] = {
                .reg = PAD_FUNCTION_EN_1,
                .mask = PMX_EGPIO_1_GRP_MASK,
                .val = PMX_EGPIO_1_GRP_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_0,
+               .mask = PMX_EGPIO_0_GRP_MASK,
+               .val = PMX_EGPIO_0_GRP_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_EGPIO_1_GRP_MASK,
+               .val = PMX_EGPIO_1_GRP_MASK,
        },
 };
 
@@ -511,6 +555,10 @@ static struct spear_muxreg smi_2_chips_muxreg[] = {
                .reg = PAD_FUNCTION_EN_0,
                .mask = PMX_SMI_MASK,
                .val = PMX_SMI_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_0,
+               .mask = PMX_SMI_MASK,
+               .val = PMX_SMI_MASK,
        },
 };
 
@@ -539,6 +587,14 @@ static struct spear_muxreg smi_4_chips_muxreg[] = {
                .reg = PAD_FUNCTION_EN_1,
                .mask = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
                .val = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_0,
+               .mask = PMX_SMI_MASK,
+               .val = PMX_SMI_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
+               .val = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
        },
 };
 
@@ -573,6 +629,10 @@ static struct spear_muxreg gmii_muxreg[] = {
                .reg = PAD_FUNCTION_EN_0,
                .mask = PMX_GMII_MASK,
                .val = PMX_GMII_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_0,
+               .mask = PMX_GMII_MASK,
+               .val = PMX_GMII_MASK,
        },
 };
 
@@ -615,6 +675,18 @@ static struct spear_muxreg rgmii_muxreg[] = {
                .reg = PAD_FUNCTION_EN_2,
                .mask = PMX_RGMII_REG2_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_0,
+               .mask = PMX_RGMII_REG0_MASK,
+               .val = PMX_RGMII_REG0_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_RGMII_REG1_MASK,
+               .val = PMX_RGMII_REG1_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_2,
+               .mask = PMX_RGMII_REG2_MASK,
+               .val = PMX_RGMII_REG2_MASK,
        },
 };
 
@@ -649,6 +721,10 @@ static struct spear_muxreg smii_0_1_2_muxreg[] = {
                .reg = PAD_FUNCTION_EN_1,
                .mask = PMX_SMII_0_1_2_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_SMII_0_1_2_MASK,
+               .val = PMX_SMII_0_1_2_MASK,
        },
 };
 
@@ -681,6 +757,10 @@ static struct spear_muxreg ras_mii_txclk_muxreg[] = {
                .reg = PAD_FUNCTION_EN_1,
                .mask = PMX_NFCE2_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_NFCE2_MASK,
+               .val = PMX_NFCE2_MASK,
        },
 };
 
@@ -721,6 +801,14 @@ static struct spear_muxreg nand_8bit_muxreg[] = {
                .reg = PAD_FUNCTION_EN_1,
                .mask = PMX_NAND8BIT_1_MASK,
                .val = PMX_NAND8BIT_1_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_0,
+               .mask = PMX_NAND8BIT_0_MASK,
+               .val = PMX_NAND8BIT_0_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_NAND8BIT_1_MASK,
+               .val = PMX_NAND8BIT_1_MASK,
        },
 };
 
@@ -747,6 +835,10 @@ static struct spear_muxreg nand_16bit_muxreg[] = {
                .reg = PAD_FUNCTION_EN_1,
                .mask = PMX_NAND16BIT_1_MASK,
                .val = PMX_NAND16BIT_1_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_NAND16BIT_1_MASK,
+               .val = PMX_NAND16BIT_1_MASK,
        },
 };
 
@@ -772,6 +864,10 @@ static struct spear_muxreg nand_4_chips_muxreg[] = {
                .reg = PAD_FUNCTION_EN_1,
                .mask = PMX_NAND_4CHIPS_MASK,
                .val = PMX_NAND_4CHIPS_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_NAND_4CHIPS_MASK,
+               .val = PMX_NAND_4CHIPS_MASK,
        },
 };
 
@@ -833,6 +929,10 @@ static struct spear_muxreg keyboard_rowcol6_8_muxreg[] = {
                .reg = PAD_FUNCTION_EN_1,
                .mask = PMX_KBD_ROWCOL68_MASK,
                .val = PMX_KBD_ROWCOL68_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_KBD_ROWCOL68_MASK,
+               .val = PMX_KBD_ROWCOL68_MASK,
        },
 };
 
@@ -866,6 +966,10 @@ static struct spear_muxreg uart0_muxreg[] = {
                .reg = PAD_FUNCTION_EN_0,
                .mask = PMX_UART0_MASK,
                .val = PMX_UART0_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_0,
+               .mask = PMX_UART0_MASK,
+               .val = PMX_UART0_MASK,
        },
 };
 
@@ -891,6 +995,10 @@ static struct spear_muxreg uart0_modem_muxreg[] = {
                .reg = PAD_FUNCTION_EN_1,
                .mask = PMX_UART0_MODEM_MASK,
                .val = PMX_UART0_MODEM_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_UART0_MODEM_MASK,
+               .val = PMX_UART0_MODEM_MASK,
        },
 };
 
@@ -923,6 +1031,10 @@ static struct spear_muxreg gpt0_tmr0_muxreg[] = {
                .reg = PAD_FUNCTION_EN_1,
                .mask = PMX_GPT0_TMR0_MASK,
                .val = PMX_GPT0_TMR0_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_GPT0_TMR0_MASK,
+               .val = PMX_GPT0_TMR0_MASK,
        },
 };
 
@@ -948,6 +1060,10 @@ static struct spear_muxreg gpt0_tmr1_muxreg[] = {
                .reg = PAD_FUNCTION_EN_1,
                .mask = PMX_GPT0_TMR1_MASK,
                .val = PMX_GPT0_TMR1_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_GPT0_TMR1_MASK,
+               .val = PMX_GPT0_TMR1_MASK,
        },
 };
 
@@ -980,6 +1096,10 @@ static struct spear_muxreg gpt1_tmr0_muxreg[] = {
                .reg = PAD_FUNCTION_EN_1,
                .mask = PMX_GPT1_TMR0_MASK,
                .val = PMX_GPT1_TMR0_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_GPT1_TMR0_MASK,
+               .val = PMX_GPT1_TMR0_MASK,
        },
 };
 
@@ -1005,6 +1125,10 @@ static struct spear_muxreg gpt1_tmr1_muxreg[] = {
                .reg = PAD_FUNCTION_EN_1,
                .mask = PMX_GPT1_TMR1_MASK,
                .val = PMX_GPT1_TMR1_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_GPT1_TMR1_MASK,
+               .val = PMX_GPT1_TMR1_MASK,
        },
 };
 
@@ -1049,6 +1173,20 @@ static const unsigned mcif_pins[] = { 86, 87, 88, 89, 90, 91, 92, 93, 213, 214,
                .reg = PAD_FUNCTION_EN_2,                       \
                .mask = PMX_MCIFALL_2_MASK,                     \
                .val = PMX_MCIFALL_2_MASK,                      \
+       }, {                                                    \
+               .reg = PAD_DIRECTION_SEL_0,                     \
+               .mask = PMX_MCI_DATA8_15_MASK,                  \
+               .val = PMX_MCI_DATA8_15_MASK,                   \
+       }, {                                                    \
+               .reg = PAD_DIRECTION_SEL_1,                     \
+               .mask = PMX_MCIFALL_1_MASK | PMX_NFWPRT1_MASK | \
+                       PMX_NFWPRT2_MASK,                       \
+               .val = PMX_MCIFALL_1_MASK | PMX_NFWPRT1_MASK |  \
+                       PMX_NFWPRT2_MASK,                       \
+       }, {                                                    \
+               .reg = PAD_DIRECTION_SEL_2,                     \
+               .mask = PMX_MCIFALL_2_MASK,                     \
+               .val = PMX_MCIFALL_2_MASK,                      \
        }
 
 /* sdhci device */
@@ -1154,6 +1292,10 @@ static struct spear_muxreg touch_xy_muxreg[] = {
                .reg = PAD_FUNCTION_EN_2,
                .mask = PMX_TOUCH_XY_MASK,
                .val = PMX_TOUCH_XY_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_2,
+               .mask = PMX_TOUCH_XY_MASK,
+               .val = PMX_TOUCH_XY_MASK,
        },
 };
 
@@ -1187,6 +1329,10 @@ static struct spear_muxreg uart1_dis_i2c_muxreg[] = {
                .reg = PAD_FUNCTION_EN_0,
                .mask = PMX_I2C0_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_0,
+               .mask = PMX_I2C0_MASK,
+               .val = PMX_I2C0_MASK,
        },
 };
 
@@ -1213,6 +1359,12 @@ static struct spear_muxreg uart1_dis_sd_muxreg[] = {
                .mask = PMX_MCIDATA1_MASK |
                        PMX_MCIDATA2_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_MCIDATA1_MASK |
+                       PMX_MCIDATA2_MASK,
+               .val = PMX_MCIDATA1_MASK |
+                       PMX_MCIDATA2_MASK,
        },
 };
 
@@ -1246,6 +1398,10 @@ static struct spear_muxreg uart2_3_muxreg[] = {
                .reg = PAD_FUNCTION_EN_0,
                .mask = PMX_I2S0_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_0,
+               .mask = PMX_I2S0_MASK,
+               .val = PMX_I2S0_MASK,
        },
 };
 
@@ -1278,6 +1434,10 @@ static struct spear_muxreg uart4_muxreg[] = {
                .reg = PAD_FUNCTION_EN_0,
                .mask = PMX_I2S0_MASK | PMX_CLCD1_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_0,
+               .mask = PMX_I2S0_MASK | PMX_CLCD1_MASK,
+               .val = PMX_I2S0_MASK | PMX_CLCD1_MASK,
        },
 };
 
@@ -1310,6 +1470,10 @@ static struct spear_muxreg uart5_muxreg[] = {
                .reg = PAD_FUNCTION_EN_0,
                .mask = PMX_CLCD1_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_0,
+               .mask = PMX_CLCD1_MASK,
+               .val = PMX_CLCD1_MASK,
        },
 };
 
@@ -1344,6 +1508,10 @@ static struct spear_muxreg rs485_0_1_tdm_0_1_muxreg[] = {
                .reg = PAD_FUNCTION_EN_0,
                .mask = PMX_CLCD1_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_0,
+               .mask = PMX_CLCD1_MASK,
+               .val = PMX_CLCD1_MASK,
        },
 };
 
@@ -1376,6 +1544,10 @@ static struct spear_muxreg i2c_1_2_muxreg[] = {
                .reg = PAD_FUNCTION_EN_0,
                .mask = PMX_CLCD1_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_0,
+               .mask = PMX_CLCD1_MASK,
+               .val = PMX_CLCD1_MASK,
        },
 };
 
@@ -1409,6 +1581,10 @@ static struct spear_muxreg i2c3_dis_smi_clcd_muxreg[] = {
                .reg = PAD_FUNCTION_EN_0,
                .mask = PMX_CLCD1_MASK | PMX_SMI_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_0,
+               .mask = PMX_CLCD1_MASK | PMX_SMI_MASK,
+               .val = PMX_CLCD1_MASK | PMX_SMI_MASK,
        },
 };
 
@@ -1435,6 +1611,10 @@ static struct spear_muxreg i2c3_dis_sd_i2s0_muxreg[] = {
                .reg = PAD_FUNCTION_EN_1,
                .mask = PMX_I2S1_MASK | PMX_MCIDATA3_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_I2S1_MASK | PMX_MCIDATA3_MASK,
+               .val = PMX_I2S1_MASK | PMX_MCIDATA3_MASK,
        },
 };
 
@@ -1469,6 +1649,10 @@ static struct spear_muxreg i2c_4_5_dis_smi_muxreg[] = {
                .reg = PAD_FUNCTION_EN_0,
                .mask = PMX_SMI_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_0,
+               .mask = PMX_SMI_MASK,
+               .val = PMX_SMI_MASK,
        },
 };
 
@@ -1499,6 +1683,14 @@ static struct spear_muxreg i2c4_dis_sd_muxreg[] = {
                .reg = PAD_FUNCTION_EN_2,
                .mask = PMX_MCIDATA5_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_MCIDATA4_MASK,
+               .val = PMX_MCIDATA4_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_2,
+               .mask = PMX_MCIDATA5_MASK,
+               .val = PMX_MCIDATA5_MASK,
        },
 };
 
@@ -1526,6 +1718,12 @@ static struct spear_muxreg i2c5_dis_sd_muxreg[] = {
                .mask = PMX_MCIDATA6_MASK |
                        PMX_MCIDATA7_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_2,
+               .mask = PMX_MCIDATA6_MASK |
+                       PMX_MCIDATA7_MASK,
+               .val = PMX_MCIDATA6_MASK |
+                       PMX_MCIDATA7_MASK,
        },
 };
 
@@ -1560,6 +1758,10 @@ static struct spear_muxreg i2c_6_7_dis_kbd_muxreg[] = {
                .reg = PAD_FUNCTION_EN_1,
                .mask = PMX_KBD_ROWCOL25_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_KBD_ROWCOL25_MASK,
+               .val = PMX_KBD_ROWCOL25_MASK,
        },
 };
 
@@ -1587,6 +1789,12 @@ static struct spear_muxreg i2c6_dis_sd_muxreg[] = {
                .mask = PMX_MCIIORDRE_MASK |
                        PMX_MCIIOWRWE_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_2,
+               .mask = PMX_MCIIORDRE_MASK |
+                       PMX_MCIIOWRWE_MASK,
+               .val = PMX_MCIIORDRE_MASK |
+                       PMX_MCIIOWRWE_MASK,
        },
 };
 
@@ -1613,6 +1821,12 @@ static struct spear_muxreg i2c7_dis_sd_muxreg[] = {
                .mask = PMX_MCIRESETCF_MASK |
                        PMX_MCICS0CE_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_2,
+               .mask = PMX_MCIRESETCF_MASK |
+                       PMX_MCICS0CE_MASK,
+               .val = PMX_MCIRESETCF_MASK |
+                       PMX_MCICS0CE_MASK,
        },
 };
 
@@ -1651,6 +1865,14 @@ static struct spear_muxreg can0_dis_nor_muxreg[] = {
                .reg = PAD_FUNCTION_EN_1,
                .mask = PMX_NFRSTPWDWN3_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_0,
+               .mask = PMX_NFRSTPWDWN2_MASK,
+               .val = PMX_NFRSTPWDWN2_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_NFRSTPWDWN3_MASK,
+               .val = PMX_NFRSTPWDWN3_MASK,
        },
 };
 
@@ -1677,6 +1899,10 @@ static struct spear_muxreg can0_dis_sd_muxreg[] = {
                .reg = PAD_FUNCTION_EN_2,
                .mask = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_2,
+               .mask = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK,
+               .val = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK,
        },
 };
 
@@ -1711,6 +1937,10 @@ static struct spear_muxreg can1_dis_sd_muxreg[] = {
                .reg = PAD_FUNCTION_EN_2,
                .mask = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_2,
+               .mask = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK,
+               .val = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK,
        },
 };
 
@@ -1737,6 +1967,10 @@ static struct spear_muxreg can1_dis_kbd_muxreg[] = {
                .reg = PAD_FUNCTION_EN_1,
                .mask = PMX_KBD_ROWCOL25_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_KBD_ROWCOL25_MASK,
+               .val = PMX_KBD_ROWCOL25_MASK,
        },
 };
 
@@ -1763,29 +1997,64 @@ static struct spear_function can1_function = {
        .ngroups = ARRAY_SIZE(can1_grps),
 };
 
-/* Pad multiplexing for pci device */
-static const unsigned pci_sata_pins[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 18,
+/* Pad multiplexing for (ras-ip) pci device */
+static const unsigned pci_pins[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 18,
        19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
        37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
        55, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99 };
-#define PCI_SATA_MUXREG                                \
-       {                                       \
-               .reg = PAD_FUNCTION_EN_0,       \
-               .mask = PMX_MCI_DATA8_15_MASK,  \
-               .val = 0,                       \
-       }, {                                    \
-               .reg = PAD_FUNCTION_EN_1,       \
-               .mask = PMX_PCI_REG1_MASK,      \
-               .val = 0,                       \
-       }, {                                    \
-               .reg = PAD_FUNCTION_EN_2,       \
-               .mask = PMX_PCI_REG2_MASK,      \
-               .val = 0,                       \
-       }
 
-/* pad multiplexing for pcie0 device */
+static struct spear_muxreg pci_muxreg[] = {
+       {
+               .reg = PAD_FUNCTION_EN_0,
+               .mask = PMX_MCI_DATA8_15_MASK,
+               .val = 0,
+       }, {
+               .reg = PAD_FUNCTION_EN_1,
+               .mask = PMX_PCI_REG1_MASK,
+               .val = 0,
+       }, {
+               .reg = PAD_FUNCTION_EN_2,
+               .mask = PMX_PCI_REG2_MASK,
+               .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_0,
+               .mask = PMX_MCI_DATA8_15_MASK,
+               .val = PMX_MCI_DATA8_15_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_PCI_REG1_MASK,
+               .val = PMX_PCI_REG1_MASK,
+       }, {
+               .reg = PAD_DIRECTION_SEL_2,
+               .mask = PMX_PCI_REG2_MASK,
+               .val = PMX_PCI_REG2_MASK,
+       },
+};
+
+static struct spear_modemux pci_modemux[] = {
+       {
+               .muxregs = pci_muxreg,
+               .nmuxregs = ARRAY_SIZE(pci_muxreg),
+       },
+};
+
+static struct spear_pingroup pci_pingroup = {
+       .name = "pci_grp",
+       .pins = pci_pins,
+       .npins = ARRAY_SIZE(pci_pins),
+       .modemuxs = pci_modemux,
+       .nmodemuxs = ARRAY_SIZE(pci_modemux),
+};
+
+static const char *const pci_grps[] = { "pci_grp" };
+static struct spear_function pci_function = {
+       .name = "pci",
+       .groups = pci_grps,
+       .ngroups = ARRAY_SIZE(pci_grps),
+};
+
+/* pad multiplexing for (fix-part) pcie0 device */
 static struct spear_muxreg pcie0_muxreg[] = {
-       PCI_SATA_MUXREG,
        {
                .reg = PCIE_SATA_CFG,
                .mask = PCIE_CFG_VAL(0),
@@ -1802,15 +2071,12 @@ static struct spear_modemux pcie0_modemux[] = {
 
 static struct spear_pingroup pcie0_pingroup = {
        .name = "pcie0_grp",
-       .pins = pci_sata_pins,
-       .npins = ARRAY_SIZE(pci_sata_pins),
        .modemuxs = pcie0_modemux,
        .nmodemuxs = ARRAY_SIZE(pcie0_modemux),
 };
 
-/* pad multiplexing for pcie1 device */
+/* pad multiplexing for (fix-part) pcie1 device */
 static struct spear_muxreg pcie1_muxreg[] = {
-       PCI_SATA_MUXREG,
        {
                .reg = PCIE_SATA_CFG,
                .mask = PCIE_CFG_VAL(1),
@@ -1827,15 +2093,12 @@ static struct spear_modemux pcie1_modemux[] = {
 
 static struct spear_pingroup pcie1_pingroup = {
        .name = "pcie1_grp",
-       .pins = pci_sata_pins,
-       .npins = ARRAY_SIZE(pci_sata_pins),
        .modemuxs = pcie1_modemux,
        .nmodemuxs = ARRAY_SIZE(pcie1_modemux),
 };
 
-/* pad multiplexing for pcie2 device */
+/* pad multiplexing for (fix-part) pcie2 device */
 static struct spear_muxreg pcie2_muxreg[] = {
-       PCI_SATA_MUXREG,
        {
                .reg = PCIE_SATA_CFG,
                .mask = PCIE_CFG_VAL(2),
@@ -1852,22 +2115,20 @@ static struct spear_modemux pcie2_modemux[] = {
 
 static struct spear_pingroup pcie2_pingroup = {
        .name = "pcie2_grp",
-       .pins = pci_sata_pins,
-       .npins = ARRAY_SIZE(pci_sata_pins),
        .modemuxs = pcie2_modemux,
        .nmodemuxs = ARRAY_SIZE(pcie2_modemux),
 };
 
-static const char *const pci_grps[] = { "pcie0_grp", "pcie1_grp", "pcie2_grp" };
-static struct spear_function pci_function = {
-       .name = "pci",
-       .groups = pci_grps,
-       .ngroups = ARRAY_SIZE(pci_grps),
+static const char *const pcie_grps[] = { "pcie0_grp", "pcie1_grp", "pcie2_grp"
+};
+static struct spear_function pcie_function = {
+       .name = "pci_express",
+       .groups = pcie_grps,
+       .ngroups = ARRAY_SIZE(pcie_grps),
 };
 
 /* pad multiplexing for sata0 device */
 static struct spear_muxreg sata0_muxreg[] = {
-       PCI_SATA_MUXREG,
        {
                .reg = PCIE_SATA_CFG,
                .mask = SATA_CFG_VAL(0),
@@ -1884,15 +2145,12 @@ static struct spear_modemux sata0_modemux[] = {
 
 static struct spear_pingroup sata0_pingroup = {
        .name = "sata0_grp",
-       .pins = pci_sata_pins,
-       .npins = ARRAY_SIZE(pci_sata_pins),
        .modemuxs = sata0_modemux,
        .nmodemuxs = ARRAY_SIZE(sata0_modemux),
 };
 
 /* pad multiplexing for sata1 device */
 static struct spear_muxreg sata1_muxreg[] = {
-       PCI_SATA_MUXREG,
        {
                .reg = PCIE_SATA_CFG,
                .mask = SATA_CFG_VAL(1),
@@ -1909,15 +2167,12 @@ static struct spear_modemux sata1_modemux[] = {
 
 static struct spear_pingroup sata1_pingroup = {
        .name = "sata1_grp",
-       .pins = pci_sata_pins,
-       .npins = ARRAY_SIZE(pci_sata_pins),
        .modemuxs = sata1_modemux,
        .nmodemuxs = ARRAY_SIZE(sata1_modemux),
 };
 
 /* pad multiplexing for sata2 device */
 static struct spear_muxreg sata2_muxreg[] = {
-       PCI_SATA_MUXREG,
        {
                .reg = PCIE_SATA_CFG,
                .mask = SATA_CFG_VAL(2),
@@ -1934,8 +2189,6 @@ static struct spear_modemux sata2_modemux[] = {
 
 static struct spear_pingroup sata2_pingroup = {
        .name = "sata2_grp",
-       .pins = pci_sata_pins,
-       .npins = ARRAY_SIZE(pci_sata_pins),
        .modemuxs = sata2_modemux,
        .nmodemuxs = ARRAY_SIZE(sata2_modemux),
 };
@@ -1957,6 +2210,14 @@ static struct spear_muxreg ssp1_dis_kbd_muxreg[] = {
                        PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK |
                        PMX_NFCE2_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_1,
+               .mask = PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL1_MASK |
+                       PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK |
+                       PMX_NFCE2_MASK,
+               .val = PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL1_MASK |
+                       PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK |
+                       PMX_NFCE2_MASK,
        },
 };
 
@@ -1983,6 +2244,12 @@ static struct spear_muxreg ssp1_dis_sd_muxreg[] = {
                .mask = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK |
                        PMX_MCICECF_MASK | PMX_MCICEXD_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_2,
+               .mask = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK |
+                       PMX_MCICECF_MASK | PMX_MCICEXD_MASK,
+               .val = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK |
+                       PMX_MCICECF_MASK | PMX_MCICEXD_MASK,
        },
 };
 
@@ -2017,6 +2284,12 @@ static struct spear_muxreg gpt64_muxreg[] = {
                .mask = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK
                        | PMX_MCILEDS_MASK,
                .val = 0,
+       }, {
+               .reg = PAD_DIRECTION_SEL_2,
+               .mask = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK
+                       | PMX_MCILEDS_MASK,
+               .val = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK
+                       | PMX_MCILEDS_MASK,
        },
 };
 
@@ -2093,6 +2366,7 @@ static struct spear_pingroup *spear1310_pingroups[] = {
        &can0_dis_sd_pingroup,
        &can1_dis_sd_pingroup,
        &can1_dis_kbd_pingroup,
+       &pci_pingroup,
        &pcie0_pingroup,
        &pcie1_pingroup,
        &pcie2_pingroup,
@@ -2138,6 +2412,7 @@ static struct spear_function *spear1310_functions[] = {
        &can0_function,
        &can1_function,
        &pci_function,
+       &pcie_function,
        &sata_function,
        &ssp1_function,
        &gpt64_function,
index a0eb057..0606b8c 100644 (file)
@@ -213,7 +213,7 @@ static const struct pinctrl_pin_desc spear1340_pins[] = {
  * Pad multiplexing for making all pads as gpio's. This is done to override the
  * values passed from bootloader and start from scratch.
  */
-static const unsigned pads_as_gpio_pins[] = { 251 };
+static const unsigned pads_as_gpio_pins[] = { 12, 88, 89, 251 };
 static struct spear_muxreg pads_as_gpio_muxreg[] = {
        {
                .reg = PAD_FUNCTION_EN_1,
@@ -1692,7 +1692,43 @@ static struct spear_pingroup clcd_pingroup = {
        .nmodemuxs = ARRAY_SIZE(clcd_modemux),
 };
 
-static const char *const clcd_grps[] = { "clcd_grp" };
+/* Disable cld runtime to save panel damage */
+static struct spear_muxreg clcd_sleep_muxreg[] = {
+       {
+               .reg = PAD_SHARED_IP_EN_1,
+               .mask = ARM_TRACE_MASK | MIPHY_DBG_MASK,
+               .val = 0,
+       }, {
+               .reg = PAD_FUNCTION_EN_5,
+               .mask = CLCD_REG4_MASK | CLCD_AND_ARM_TRACE_REG4_MASK,
+               .val = 0x0,
+       }, {
+               .reg = PAD_FUNCTION_EN_6,
+               .mask = CLCD_AND_ARM_TRACE_REG5_MASK,
+               .val = 0x0,
+       }, {
+               .reg = PAD_FUNCTION_EN_7,
+               .mask = CLCD_AND_ARM_TRACE_REG6_MASK,
+               .val = 0x0,
+       },
+};
+
+static struct spear_modemux clcd_sleep_modemux[] = {
+       {
+               .muxregs = clcd_sleep_muxreg,
+               .nmuxregs = ARRAY_SIZE(clcd_sleep_muxreg),
+       },
+};
+
+static struct spear_pingroup clcd_sleep_pingroup = {
+       .name = "clcd_sleep_grp",
+       .pins = clcd_pins,
+       .npins = ARRAY_SIZE(clcd_pins),
+       .modemuxs = clcd_sleep_modemux,
+       .nmodemuxs = ARRAY_SIZE(clcd_sleep_modemux),
+};
+
+static const char *const clcd_grps[] = { "clcd_grp", "clcd_sleep_grp" };
 static struct spear_function clcd_function = {
        .name = "clcd",
        .groups = clcd_grps,
@@ -1893,6 +1929,7 @@ static struct spear_pingroup *spear1340_pingroups[] = {
        &sdhci_pingroup,
        &cf_pingroup,
        &xd_pingroup,
+       &clcd_sleep_pingroup,
        &clcd_pingroup,
        &arm_trace_pingroup,
        &miphy_dbg_pingroup,
index 020b1e0..ca47b0e 100644 (file)
@@ -2240,6 +2240,10 @@ static struct spear_muxreg pwm2_pin_34_muxreg[] = {
                .mask = PMX_SSP_CS_MASK,
                .val = 0,
        }, {
+               .reg = MODE_CONFIG_REG,
+               .mask = PMX_PWM_MASK,
+               .val = PMX_PWM_MASK,
+       }, {
                .reg = IP_SEL_PAD_30_39_REG,
                .mask = PMX_PL_34_MASK,
                .val = PMX_PWM2_PL_34_VAL,
@@ -2956,9 +2960,9 @@ static struct spear_function mii2_function = {
 };
 
 /* Pad multiplexing for cadence mii 1_2 as smii or rmii device */
-static const unsigned smii0_1_pins[] = { 10, 11, 13, 14, 15, 16, 17, 18, 19, 20,
+static const unsigned rmii0_1_pins[] = { 10, 11, 13, 14, 15, 16, 17, 18, 19, 20,
        21, 22, 23, 24, 25, 26, 27 };
-static const unsigned rmii0_1_pins[] = { 10, 11, 21, 22, 23, 24, 25, 26, 27 };
+static const unsigned smii0_1_pins[] = { 10, 11, 21, 22, 23, 24, 25, 26, 27 };
 static struct spear_muxreg mii0_1_muxreg[] = {
        {
                .reg = PMX_CONFIG_REG,
index 31f4434..7860b36 100644 (file)
@@ -15,6 +15,7 @@
 #include "pinctrl-spear.h"
 
 /* pad mux declarations */
+#define PMX_PWM_MASK           (1 << 16)
 #define PMX_FIRDA_MASK         (1 << 14)
 #define PMX_I2C_MASK           (1 << 13)
 #define PMX_SSP_CS_MASK                (1 << 12)
index c17ae22..0c6fcb4 100644 (file)
@@ -401,7 +401,7 @@ EXPORT_SYMBOL_GPL(rio_release_inb_pwrite);
 /**
  * rio_map_inb_region -- Map inbound memory region.
  * @mport: Master port.
- * @lstart: physical address of memory region to be mapped
+ * @local: physical address of memory region to be mapped
  * @rbase: RIO base address assigned to this window
  * @size: Size of the memory region
  * @rflags: Flags for mapping.
index 5c4829c..e872c8b 100644 (file)
@@ -1381,22 +1381,14 @@ struct regulator *regulator_get_exclusive(struct device *dev, const char *id)
 }
 EXPORT_SYMBOL_GPL(regulator_get_exclusive);
 
-/**
- * regulator_put - "free" the regulator source
- * @regulator: regulator source
- *
- * Note: drivers must ensure that all regulator_enable calls made on this
- * regulator source are balanced by regulator_disable calls prior to calling
- * this function.
- */
-void regulator_put(struct regulator *regulator)
+/* Locks held by regulator_put() */
+static void _regulator_put(struct regulator *regulator)
 {
        struct regulator_dev *rdev;
 
        if (regulator == NULL || IS_ERR(regulator))
                return;
 
-       mutex_lock(&regulator_list_mutex);
        rdev = regulator->rdev;
 
        debugfs_remove_recursive(regulator->debugfs);
@@ -1412,6 +1404,20 @@ void regulator_put(struct regulator *regulator)
        rdev->exclusive = 0;
 
        module_put(rdev->owner);
+}
+
+/**
+ * regulator_put - "free" the regulator source
+ * @regulator: regulator source
+ *
+ * Note: drivers must ensure that all regulator_enable calls made on this
+ * regulator source are balanced by regulator_disable calls prior to calling
+ * this function.
+ */
+void regulator_put(struct regulator *regulator)
+{
+       mutex_lock(&regulator_list_mutex);
+       _regulator_put(regulator);
        mutex_unlock(&regulator_list_mutex);
 }
 EXPORT_SYMBOL_GPL(regulator_put);
@@ -1974,7 +1980,7 @@ int regulator_is_supported_voltage(struct regulator *regulator,
        if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
                ret = regulator_get_voltage(regulator);
                if (ret >= 0)
-                       return (min_uV >= ret && ret <= max_uV);
+                       return (min_uV <= ret && ret <= max_uV);
                else
                        return ret;
        }
@@ -3365,7 +3371,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
                if (ret != 0) {
                        rdev_err(rdev, "Failed to request enable GPIO%d: %d\n",
                                 config->ena_gpio, ret);
-                       goto clean;
+                       goto wash;
                }
 
                rdev->ena_gpio = config->ena_gpio;
@@ -3445,10 +3451,11 @@ unset_supplies:
 
 scrub:
        if (rdev->supply)
-               regulator_put(rdev->supply);
+               _regulator_put(rdev->supply);
        if (rdev->ena_gpio)
                gpio_free(rdev->ena_gpio);
        kfree(rdev->constraints);
+wash:
        device_unregister(&rdev->dev);
        /* device core frees rdev */
        rdev = ERR_PTR(ret);
index 9ffb6d5..4ed343e 100644 (file)
@@ -44,7 +44,6 @@
 #define RAW3215_NR_CCWS            3
 #define RAW3215_TIMEOUT            HZ/10     /* time for delayed output */
 
-#define RAW3215_FIXED      1         /* 3215 console device is not be freed */
 #define RAW3215_WORKING            4         /* set if a request is being worked on */
 #define RAW3215_THROTTLED   8        /* set if reading is disabled */
 #define RAW3215_STOPPED            16        /* set if writing is disabled */
@@ -339,8 +338,10 @@ static void raw3215_wakeup(unsigned long data)
        struct tty_struct *tty;
 
        tty = tty_port_tty_get(&raw->port);
-       tty_wakeup(tty);
-       tty_kref_put(tty);
+       if (tty) {
+               tty_wakeup(tty);
+               tty_kref_put(tty);
+       }
 }
 
 /*
@@ -629,8 +630,7 @@ static void raw3215_shutdown(struct raw3215_info *raw)
        DECLARE_WAITQUEUE(wait, current);
        unsigned long flags;
 
-       if (!(raw->port.flags & ASYNC_INITIALIZED) ||
-                       (raw->flags & RAW3215_FIXED))
+       if (!(raw->port.flags & ASYNC_INITIALIZED))
                return;
        /* Wait for outstanding requests, then free irq */
        spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
@@ -926,8 +926,6 @@ static int __init con3215_init(void)
        dev_set_drvdata(&cdev->dev, raw);
        cdev->handler = raw3215_irq;
 
-       raw->flags |= RAW3215_FIXED;
-
        /* Request the console irq */
        if (raw3215_startup(raw) != 0) {
                raw3215_free_info(raw);
index 33bb4d8..4af3dfe 100644 (file)
@@ -112,9 +112,6 @@ extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
 extern void css_reiterate_subchannels(void);
 void css_update_ssd_info(struct subchannel *sch);
 
-#define __MAX_SUBCHANNEL 65535
-#define __MAX_SSID 3
-
 struct channel_subsystem {
        u8 cssid;
        int valid;
index fc916f5..fd3143c 100644 (file)
@@ -1424,7 +1424,7 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
        }
        if (device_is_disconnected(cdev))
                return IO_SCH_REPROBE;
-       if (cdev->online)
+       if (cdev->online && !cdev->private->flags.resuming)
                return IO_SCH_VERIFY;
        if (cdev->private->state == DEV_STATE_NOT_OPER)
                return IO_SCH_UNREG_ATTACH;
@@ -1469,12 +1469,6 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
                rc = 0;
                goto out_unlock;
        case IO_SCH_VERIFY:
-               if (cdev->private->flags.resuming == 1) {
-                       if (cio_enable_subchannel(sch, (u32)(addr_t)sch)) {
-                               ccw_device_set_notoper(cdev);
-                               break;
-                       }
-               }
                /* Trigger path verification. */
                io_subchannel_verify(sch);
                rc = 0;
index 199bc67..65d13e3 100644 (file)
@@ -125,8 +125,7 @@ int idset_is_empty(struct idset *set)
 
 void idset_add_set(struct idset *to, struct idset *from)
 {
-       int len = min(__BITOPS_WORDS(to->num_ssid * to->num_id),
-                     __BITOPS_WORDS(from->num_ssid * from->num_id));
+       int len = min(to->num_ssid * to->num_id, from->num_ssid * from->num_id);
 
        bitmap_or(to->bitmap, to->bitmap, from->bitmap, len);
 }
index 3e25d31..4d6ba00 100644 (file)
@@ -2942,13 +2942,33 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
        QETH_DBF_TEXT(SETUP, 2, "qipasscb");
 
        cmd = (struct qeth_ipa_cmd *) data;
+
+       switch (cmd->hdr.return_code) {
+       case IPA_RC_NOTSUPP:
+       case IPA_RC_L2_UNSUPPORTED_CMD:
+               QETH_DBF_TEXT(SETUP, 2, "ipaunsup");
+               card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
+               card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
+               return -0;
+       default:
+               if (cmd->hdr.return_code) {
+                       QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled "
+                                               "rc=%d\n",
+                                               dev_name(&card->gdev->dev),
+                                               cmd->hdr.return_code);
+                       return 0;
+               }
+       }
+
        if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
                card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
                card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
-       } else {
+       } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) {
                card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
                card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
-       }
+       } else
+               QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected"
+                                       "\n", dev_name(&card->gdev->dev));
        QETH_DBF_TEXT(SETUP, 2, "suppenbl");
        QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_supported);
        QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_enabled);
index e67e025..fddb626 100644 (file)
@@ -626,10 +626,13 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
        QETH_DBF_TEXT(SETUP, 2, "doL2init");
        QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card));
 
-       rc = qeth_query_setadapterparms(card);
-       if (rc) {
-               QETH_DBF_MESSAGE(2, "could not query adapter parameters on "
-                       "device %s: x%x\n", CARD_BUS_ID(card), rc);
+       if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
+               rc = qeth_query_setadapterparms(card);
+               if (rc) {
+                       QETH_DBF_MESSAGE(2, "could not query adapter "
+                               "parameters on device %s: x%x\n",
+                               CARD_BUS_ID(card), rc);
+               }
        }
 
        if (card->info.type == QETH_CARD_TYPE_IQD ||
@@ -676,7 +679,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
                return -ERESTARTSYS;
        }
        rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
-       if (!rc)
+       if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND))
                rc = qeth_l2_send_setmac(card, addr->sa_data);
        return rc ? -EINVAL : 0;
 }
index c1bafc3..9594ab6 100644 (file)
@@ -1972,7 +1972,7 @@ sci_io_request_frame_handler(struct isci_request *ireq,
                                                                      frame_index,
                                                                      (void **)&frame_buffer);
 
-                       sci_controller_copy_sata_response(&ireq->stp.req,
+                       sci_controller_copy_sata_response(&ireq->stp.rsp,
                                                               frame_header,
                                                               frame_buffer);
 
index b191dd5..71fddbc 100644 (file)
@@ -1294,26 +1294,19 @@ static struct scsi_host_template qpti_template = {
 static const struct of_device_id qpti_match[];
 static int __devinit qpti_sbus_probe(struct platform_device *op)
 {
-       const struct of_device_id *match;
-       struct scsi_host_template *tpnt;
        struct device_node *dp = op->dev.of_node;
        struct Scsi_Host *host;
        struct qlogicpti *qpti;
        static int nqptis;
        const char *fcode;
 
-       match = of_match_device(qpti_match, &op->dev);
-       if (!match)
-               return -EINVAL;
-       tpnt = match->data;
-
        /* Sometimes Antares cards come up not completely
         * setup, and we get a report of a zero IRQ.
         */
        if (op->archdata.irqs[0] == 0)
                return -ENODEV;
 
-       host = scsi_host_alloc(tpnt, sizeof(struct qlogicpti));
+       host = scsi_host_alloc(&qpti_template, sizeof(struct qlogicpti));
        if (!host)
                return -ENOMEM;
 
@@ -1445,19 +1438,15 @@ static int __devexit qpti_sbus_remove(struct platform_device *op)
 static const struct of_device_id qpti_match[] = {
        {
                .name = "ptisp",
-               .data = &qpti_template,
        },
        {
                .name = "PTI,ptisp",
-               .data = &qpti_template,
        },
        {
                .name = "QLGC,isp",
-               .data = &qpti_template,
        },
        {
                .name = "SUNW,isp",
-               .data = &qpti_template,
        },
        {},
 };
index 2936b44..2c0d0ec 100644 (file)
@@ -55,6 +55,7 @@
 #include <linux/cpu.h>
 #include <linux/mutex.h>
 #include <linux/async.h>
+#include <asm/unaligned.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -1062,6 +1063,50 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
 EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
 
 /**
+ * scsi_report_opcode - Find out if a given command opcode is supported
+ * @sdev:      scsi device to query
+ * @buffer:    scratch buffer (must be at least 20 bytes long)
+ * @len:       length of buffer
+ * @opcode:    opcode for command to look up
+ *
+ * Uses the REPORT SUPPORTED OPERATION CODES to look up the given
+ * opcode. Returns 0 if RSOC fails or if the command opcode is
+ * unsupported. Returns 1 if the device claims to support the command.
+ */
+int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
+                      unsigned int len, unsigned char opcode)
+{
+       unsigned char cmd[16];
+       struct scsi_sense_hdr sshdr;
+       int result;
+
+       if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
+               return 0;
+
+       memset(cmd, 0, 16);
+       cmd[0] = MAINTENANCE_IN;
+       cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
+       cmd[2] = 1;             /* One command format */
+       cmd[3] = opcode;
+       put_unaligned_be32(len, &cmd[6]);
+       memset(buffer, 0, len);
+
+       result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
+                                 &sshdr, 30 * HZ, 3, NULL);
+
+       if (result && scsi_sense_valid(&sshdr) &&
+           sshdr.sense_key == ILLEGAL_REQUEST &&
+           (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
+               return 0;
+
+       if ((buffer[1] & 3) == 3) /* Command supported */
+               return 1;
+
+       return 0;
+}
+EXPORT_SYMBOL(scsi_report_opcode);
+
+/**
  * scsi_device_get  -  get an additional reference to a scsi_device
  * @sdev:      device to get a reference to
  *
index da36a3a..9032e91 100644 (file)
@@ -900,11 +900,23 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                                action = ACTION_FAIL;
                                error = -EILSEQ;
                        /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
-                       } else if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
-                                  (cmd->cmnd[0] == UNMAP ||
-                                   cmd->cmnd[0] == WRITE_SAME_16 ||
-                                   cmd->cmnd[0] == WRITE_SAME)) {
-                               description = "Discard failure";
+                       } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
+                               switch (cmd->cmnd[0]) {
+                               case UNMAP:
+                                       description = "Discard failure";
+                                       break;
+                               case WRITE_SAME:
+                               case WRITE_SAME_16:
+                                       if (cmd->cmnd[1] & 0x8)
+                                               description = "Discard failure";
+                                       else
+                                               description =
+                                                       "Write same failure";
+                                       break;
+                               default:
+                                       description = "Invalid command failure";
+                                       break;
+                               }
                                action = ACTION_FAIL;
                                error = -EREMOTEIO;
                        } else
index 12f6fdf..352bc77 100644 (file)
@@ -99,6 +99,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
 #endif
 
 static void sd_config_discard(struct scsi_disk *, unsigned int);
+static void sd_config_write_same(struct scsi_disk *);
 static int  sd_revalidate_disk(struct gendisk *);
 static void sd_unlock_native_capacity(struct gendisk *disk);
 static int  sd_probe(struct device *);
@@ -395,6 +396,45 @@ sd_store_max_medium_access_timeouts(struct device *dev,
        return err ? err : count;
 }
 
+static ssize_t
+sd_show_write_same_blocks(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+       return snprintf(buf, 20, "%u\n", sdkp->max_ws_blocks);
+}
+
+static ssize_t
+sd_store_write_same_blocks(struct device *dev, struct device_attribute *attr,
+                          const char *buf, size_t count)
+{
+       struct scsi_disk *sdkp = to_scsi_disk(dev);
+       struct scsi_device *sdp = sdkp->device;
+       unsigned long max;
+       int err;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+
+       if (sdp->type != TYPE_DISK)
+               return -EINVAL;
+
+       err = kstrtoul(buf, 10, &max);
+
+       if (err)
+               return err;
+
+       if (max == 0)
+               sdp->no_write_same = 1;
+       else if (max <= SD_MAX_WS16_BLOCKS)
+               sdkp->max_ws_blocks = max;
+
+       sd_config_write_same(sdkp);
+
+       return count;
+}
+
 static struct device_attribute sd_disk_attrs[] = {
        __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
               sd_store_cache_type),
@@ -410,6 +450,8 @@ static struct device_attribute sd_disk_attrs[] = {
        __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
        __ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode,
               sd_store_provisioning_mode),
+       __ATTR(max_write_same_blocks, S_IRUGO|S_IWUSR,
+              sd_show_write_same_blocks, sd_store_write_same_blocks),
        __ATTR(max_medium_access_timeouts, S_IRUGO|S_IWUSR,
               sd_show_max_medium_access_timeouts,
               sd_store_max_medium_access_timeouts),
@@ -561,19 +603,23 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
                return;
 
        case SD_LBP_UNMAP:
-               max_blocks = min_not_zero(sdkp->max_unmap_blocks, 0xffffffff);
+               max_blocks = min_not_zero(sdkp->max_unmap_blocks,
+                                         (u32)SD_MAX_WS16_BLOCKS);
                break;
 
        case SD_LBP_WS16:
-               max_blocks = min_not_zero(sdkp->max_ws_blocks, 0xffffffff);
+               max_blocks = min_not_zero(sdkp->max_ws_blocks,
+                                         (u32)SD_MAX_WS16_BLOCKS);
                break;
 
        case SD_LBP_WS10:
-               max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff);
+               max_blocks = min_not_zero(sdkp->max_ws_blocks,
+                                         (u32)SD_MAX_WS10_BLOCKS);
                break;
 
        case SD_LBP_ZERO:
-               max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff);
+               max_blocks = min_not_zero(sdkp->max_ws_blocks,
+                                         (u32)SD_MAX_WS10_BLOCKS);
                q->limits.discard_zeroes_data = 1;
                break;
        }
@@ -583,29 +629,26 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
 }
 
 /**
- * scsi_setup_discard_cmnd - unmap blocks on thinly provisioned device
+ * sd_setup_discard_cmnd - unmap blocks on thinly provisioned device
  * @sdp: scsi device to operate one
  * @rq: Request to prepare
  *
  * Will issue either UNMAP or WRITE SAME(16) depending on preference
  * indicated by target device.
  **/
-static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
+static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
 {
        struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
-       struct bio *bio = rq->bio;
-       sector_t sector = bio->bi_sector;
-       unsigned int nr_sectors = bio_sectors(bio);
+       sector_t sector = blk_rq_pos(rq);
+       unsigned int nr_sectors = blk_rq_sectors(rq);
+       unsigned int nr_bytes = blk_rq_bytes(rq);
        unsigned int len;
        int ret;
        char *buf;
        struct page *page;
 
-       if (sdkp->device->sector_size == 4096) {
-               sector >>= 3;
-               nr_sectors >>= 3;
-       }
-
+       sector >>= ilog2(sdp->sector_size) - 9;
+       nr_sectors >>= ilog2(sdp->sector_size) - 9;
        rq->timeout = SD_TIMEOUT;
 
        memset(rq->cmd, 0, rq->cmd_len);
@@ -660,6 +703,7 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
        blk_add_request_payload(rq, page, len);
        ret = scsi_setup_blk_pc_cmnd(sdp, rq);
        rq->buffer = page_address(page);
+       rq->__data_len = nr_bytes;
 
 out:
        if (ret != BLKPREP_OK) {
@@ -669,6 +713,83 @@ out:
        return ret;
 }
 
+static void sd_config_write_same(struct scsi_disk *sdkp)
+{
+       struct request_queue *q = sdkp->disk->queue;
+       unsigned int logical_block_size = sdkp->device->sector_size;
+       unsigned int blocks = 0;
+
+       if (sdkp->device->no_write_same) {
+               sdkp->max_ws_blocks = 0;
+               goto out;
+       }
+
+       /* Some devices can not handle block counts above 0xffff despite
+        * supporting WRITE SAME(16). Consequently we default to 64k
+        * blocks per I/O unless the device explicitly advertises a
+        * bigger limit.
+        */
+       if (sdkp->max_ws_blocks == 0)
+               sdkp->max_ws_blocks = SD_MAX_WS10_BLOCKS;
+
+       if (sdkp->ws16 || sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
+               blocks = min_not_zero(sdkp->max_ws_blocks,
+                                     (u32)SD_MAX_WS16_BLOCKS);
+       else
+               blocks = min_not_zero(sdkp->max_ws_blocks,
+                                     (u32)SD_MAX_WS10_BLOCKS);
+
+out:
+       blk_queue_max_write_same_sectors(q, blocks * (logical_block_size >> 9));
+}
+
+/**
+ * sd_setup_write_same_cmnd - write the same data to multiple blocks
+ * @sdp: scsi device to operate one
+ * @rq: Request to prepare
+ *
+ * Will issue either WRITE SAME(10) or WRITE SAME(16) depending on
+ * preference indicated by target device.
+ **/
+static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq)
+{
+       struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+       struct bio *bio = rq->bio;
+       sector_t sector = blk_rq_pos(rq);
+       unsigned int nr_sectors = blk_rq_sectors(rq);
+       unsigned int nr_bytes = blk_rq_bytes(rq);
+       int ret;
+
+       if (sdkp->device->no_write_same)
+               return BLKPREP_KILL;
+
+       BUG_ON(bio_offset(bio) || bio_iovec(bio)->bv_len != sdp->sector_size);
+
+       sector >>= ilog2(sdp->sector_size) - 9;
+       nr_sectors >>= ilog2(sdp->sector_size) - 9;
+
+       rq->__data_len = sdp->sector_size;
+       rq->timeout = SD_WRITE_SAME_TIMEOUT;
+       memset(rq->cmd, 0, rq->cmd_len);
+
+       if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) {
+               rq->cmd_len = 16;
+               rq->cmd[0] = WRITE_SAME_16;
+               put_unaligned_be64(sector, &rq->cmd[2]);
+               put_unaligned_be32(nr_sectors, &rq->cmd[10]);
+       } else {
+               rq->cmd_len = 10;
+               rq->cmd[0] = WRITE_SAME;
+               put_unaligned_be32(sector, &rq->cmd[2]);
+               put_unaligned_be16(nr_sectors, &rq->cmd[7]);
+       }
+
+       ret = scsi_setup_blk_pc_cmnd(sdp, rq);
+       rq->__data_len = nr_bytes;
+
+       return ret;
+}
+
 static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
 {
        rq->timeout = SD_FLUSH_TIMEOUT;
@@ -712,7 +833,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
         * block PC requests to make life easier.
         */
        if (rq->cmd_flags & REQ_DISCARD) {
-               ret = scsi_setup_discard_cmnd(sdp, rq);
+               ret = sd_setup_discard_cmnd(sdp, rq);
+               goto out;
+       } else if (rq->cmd_flags & REQ_WRITE_SAME) {
+               ret = sd_setup_write_same_cmnd(sdp, rq);
                goto out;
        } else if (rq->cmd_flags & REQ_FLUSH) {
                ret = scsi_setup_flush_cmnd(sdp, rq);
@@ -1482,12 +1606,21 @@ static int sd_done(struct scsi_cmnd *SCpnt)
        unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
        struct scsi_sense_hdr sshdr;
        struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
+       struct request *req = SCpnt->request;
        int sense_valid = 0;
        int sense_deferred = 0;
        unsigned char op = SCpnt->cmnd[0];
+       unsigned char unmap = SCpnt->cmnd[1] & 8;
 
-       if ((SCpnt->request->cmd_flags & REQ_DISCARD) && !result)
-               scsi_set_resid(SCpnt, 0);
+       if (req->cmd_flags & REQ_DISCARD || req->cmd_flags & REQ_WRITE_SAME) {
+               if (!result) {
+                       good_bytes = blk_rq_bytes(req);
+                       scsi_set_resid(SCpnt, 0);
+               } else {
+                       good_bytes = 0;
+                       scsi_set_resid(SCpnt, blk_rq_bytes(req));
+               }
+       }
 
        if (result) {
                sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
@@ -1536,9 +1669,25 @@ static int sd_done(struct scsi_cmnd *SCpnt)
                if (sshdr.asc == 0x10)  /* DIX: Host detected corruption */
                        good_bytes = sd_completed_bytes(SCpnt);
                /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
-               if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
-                   (op == UNMAP || op == WRITE_SAME_16 || op == WRITE_SAME))
-                       sd_config_discard(sdkp, SD_LBP_DISABLE);
+               if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
+                       switch (op) {
+                       case UNMAP:
+                               sd_config_discard(sdkp, SD_LBP_DISABLE);
+                               break;
+                       case WRITE_SAME_16:
+                       case WRITE_SAME:
+                               if (unmap)
+                                       sd_config_discard(sdkp, SD_LBP_DISABLE);
+                               else {
+                                       sdkp->device->no_write_same = 1;
+                                       sd_config_write_same(sdkp);
+
+                                       good_bytes = 0;
+                                       req->__data_len = blk_rq_bytes(req);
+                                       req->cmd_flags |= REQ_QUIET;
+                               }
+                       }
+               }
                break;
        default:
                break;
@@ -2374,9 +2523,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
        if (buffer[3] == 0x3c) {
                unsigned int lba_count, desc_count;
 
-               sdkp->max_ws_blocks =
-                       (u32) min_not_zero(get_unaligned_be64(&buffer[36]),
-                                          (u64)0xffffffff);
+               sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]);
 
                if (!sdkp->lbpme)
                        goto out;
@@ -2469,6 +2616,13 @@ static void sd_read_block_provisioning(struct scsi_disk *sdkp)
        kfree(buffer);
 }
 
+static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
+{
+       if (scsi_report_opcode(sdkp->device, buffer, SD_BUF_SIZE,
+                              WRITE_SAME_16))
+               sdkp->ws16 = 1;
+}
+
 static int sd_try_extended_inquiry(struct scsi_device *sdp)
 {
        /*
@@ -2528,6 +2682,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
                sd_read_write_protect_flag(sdkp, buffer);
                sd_read_cache_type(sdkp, buffer);
                sd_read_app_tag_own(sdkp, buffer);
+               sd_read_write_same(sdkp, buffer);
        }
 
        sdkp->first_scan = 0;
@@ -2545,6 +2700,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
        blk_queue_flush(sdkp->disk->queue, flush);
 
        set_capacity(disk, sdkp->capacity);
+       sd_config_write_same(sdkp);
        kfree(buffer);
 
  out:
index 47c52a6..74a1e4c 100644 (file)
@@ -14,6 +14,7 @@
 #define SD_TIMEOUT             (30 * HZ)
 #define SD_MOD_TIMEOUT         (75 * HZ)
 #define SD_FLUSH_TIMEOUT       (60 * HZ)
+#define SD_WRITE_SAME_TIMEOUT  (120 * HZ)
 
 /*
  * Number of allowed retries
@@ -39,6 +40,11 @@ enum {
 };
 
 enum {
+       SD_MAX_WS10_BLOCKS = 0xffff,
+       SD_MAX_WS16_BLOCKS = 0x7fffff,
+};
+
+enum {
        SD_LBP_FULL = 0,        /* Full logical block provisioning */
        SD_LBP_UNMAP,           /* Use UNMAP command */
        SD_LBP_WS16,            /* Use WRITE SAME(16) with UNMAP bit */
@@ -77,6 +83,7 @@ struct scsi_disk {
        unsigned        lbpws : 1;
        unsigned        lbpws10 : 1;
        unsigned        lbpvpd : 1;
+       unsigned        ws16 : 1;
 };
 #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
 
index f2ffd96..d0cafd6 100644 (file)
@@ -51,12 +51,10 @@ enum android_alarm_return_flags {
 #define ANDROID_ALARM_WAIT                  _IO('a', 1)
 
 #define ALARM_IOW(c, type, size)            _IOW('a', (c) | ((type) << 4), size)
-#define ALARM_IOR(c, type, size)            _IOR('a', (c) | ((type) << 4), size)
-
 /* Set alarm */
 #define ANDROID_ALARM_SET(type)             ALARM_IOW(2, type, struct timespec)
 #define ANDROID_ALARM_SET_AND_WAIT(type)    ALARM_IOW(3, type, struct timespec)
-#define ANDROID_ALARM_GET_TIME(type)        ALARM_IOR(4, type, struct timespec)
+#define ANDROID_ALARM_GET_TIME(type)        ALARM_IOW(4, type, struct timespec)
 #define ANDROID_ALARM_SET_RTC               _IOW('a', 5, struct timespec)
 #define ANDROID_ALARM_BASE_CMD(cmd)         (cmd & ~(_IOC(0, 0, 0xf0, 0)))
 #define ANDROID_ALARM_IOCTL_TO_TYPE(cmd)    (_IOC_NR(cmd) >> 4)
index a5dec1c..13ee53b 100644 (file)
@@ -424,7 +424,6 @@ static void hvc_hangup(struct tty_struct *tty)
 {
        struct hvc_struct *hp = tty->driver_data;
        unsigned long flags;
-       int temp_open_count;
 
        if (!hp)
                return;
@@ -444,7 +443,6 @@ static void hvc_hangup(struct tty_struct *tty)
                return;
        }
 
-       temp_open_count = hp->port.count;
        hp->port.count = 0;
        spin_unlock_irqrestore(&hp->port.lock, flags);
        tty_port_tty_set(&hp->port, NULL);
@@ -453,11 +451,6 @@ static void hvc_hangup(struct tty_struct *tty)
 
        if (hp->ops->notifier_hangup)
                hp->ops->notifier_hangup(hp, hp->data);
-
-       while(temp_open_count) {
-               --temp_open_count;
-               tty_port_put(&hp->port);
-       }
 }
 
 /*
index 2bc28a5..1ab1d2c 100644 (file)
@@ -1239,6 +1239,7 @@ static int __devexit max310x_remove(struct spi_device *spi)
 static const struct spi_device_id max310x_id_table[] = {
        { "max3107",    MAX310X_TYPE_MAX3107 },
        { "max3108",    MAX310X_TYPE_MAX3108 },
+       { }
 };
 MODULE_DEVICE_TABLE(spi, max310x_id_table);
 
index 1e741bc..f034716 100644 (file)
@@ -2151,8 +2151,15 @@ EXPORT_SYMBOL_GPL(usb_bus_start_enum);
 irqreturn_t usb_hcd_irq (int irq, void *__hcd)
 {
        struct usb_hcd          *hcd = __hcd;
+       unsigned long           flags;
        irqreturn_t             rc;
 
+       /* IRQF_DISABLED doesn't work correctly with shared IRQs
+        * when the first handler doesn't use it.  So let's just
+        * assume it's never used.
+        */
+       local_irq_save(flags);
+
        if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd)))
                rc = IRQ_NONE;
        else if (hcd->driver->irq(hcd) == IRQ_NONE)
@@ -2160,6 +2167,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd)
        else
                rc = IRQ_HANDLED;
 
+       local_irq_restore(flags);
        return rc;
 }
 EXPORT_SYMBOL_GPL(usb_hcd_irq);
@@ -2347,6 +2355,14 @@ static int usb_hcd_request_irqs(struct usb_hcd *hcd,
        int retval;
 
        if (hcd->driver->irq) {
+
+               /* IRQF_DISABLED doesn't work as advertised when used together
+                * with IRQF_SHARED. As usb_hcd_irq() will always disable
+                * interrupts we can remove it here.
+                */
+               if (irqflags & IRQF_SHARED)
+                       irqflags &= ~IRQF_DISABLED;
+
                snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
                                hcd->driver->description, hcd->self.busnum);
                retval = request_irq(irqnum, &usb_hcd_irq, irqflags,
index e426ad6..4bfa78a 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/usb/ehci_def.h>
 #include <linux/delay.h>
 #include <linux/serial_core.h>
+#include <linux/kconfig.h>
 #include <linux/kgdb.h>
 #include <linux/kthread.h>
 #include <asm/io.h>
@@ -614,12 +615,6 @@ err:
        return -ENODEV;
 }
 
-int dbgp_external_startup(struct usb_hcd *hcd)
-{
-       return xen_dbgp_external_startup(hcd) ?: _dbgp_external_startup();
-}
-EXPORT_SYMBOL_GPL(dbgp_external_startup);
-
 static int ehci_reset_port(int port)
 {
        u32 portsc;
@@ -979,6 +974,7 @@ struct console early_dbgp_console = {
        .index =        -1,
 };
 
+#if IS_ENABLED(CONFIG_USB_EHCI_HCD)
 int dbgp_reset_prep(struct usb_hcd *hcd)
 {
        int ret = xen_dbgp_reset_prep(hcd);
@@ -1007,6 +1003,13 @@ int dbgp_reset_prep(struct usb_hcd *hcd)
 }
 EXPORT_SYMBOL_GPL(dbgp_reset_prep);
 
+int dbgp_external_startup(struct usb_hcd *hcd)
+{
+       return xen_dbgp_external_startup(hcd) ?: _dbgp_external_startup();
+}
+EXPORT_SYMBOL_GPL(dbgp_external_startup);
+#endif /* USB_EHCI_HCD */
+
 #ifdef CONFIG_KGDB
 
 static char kgdbdbgp_buf[DBGP_MAX_PACKET];
index 6458764..4ec3c0d 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/ctype.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
+#include <linux/if_vlan.h>
 
 #include "u_ether.h"
 
@@ -295,7 +296,7 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
                while (skb2) {
                        if (status < 0
                                        || ETH_HLEN > skb2->len
-                                       || skb2->len > ETH_FRAME_LEN) {
+                                       || skb2->len > VLAN_ETH_FRAME_LEN) {
                                dev->net->stats.rx_errors++;
                                dev->net->stats.rx_length_errors++;
                                DBG(dev, "rx length %d\n", skb2->len);
index ca75965..aa0f328 100644 (file)
@@ -113,7 +113,7 @@ static int ehci_hcd_ls1x_probe(struct platform_device *pdev)
                goto err_put_hcd;
        }
 
-       ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+       ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
        if (ret)
                goto err_put_hcd;
 
index 84201cd..41e378f 100644 (file)
@@ -56,7 +56,7 @@ static int ohci_xls_probe_internal(const struct hc_driver *driver,
                goto err3;
        }
 
-       retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+       retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
        if (retval != 0)
                goto err4;
        return retval;
index d0b87e7..b6b84da 100644 (file)
@@ -707,11 +707,12 @@ static void rxstate(struct musb *musb, struct musb_request *req)
                fifo_count = musb_readw(epio, MUSB_RXCOUNT);
 
                /*
-                *  use mode 1 only if we expect data of at least ep packet_sz
-                *  and have not yet received a short packet
+                * Enable Mode 1 on RX transfers only when short_not_ok flag
+                * is set. Currently short_not_ok flag is set only from
+                * file_storage and f_mass_storage drivers
                 */
-               if ((request->length - request->actual >= musb_ep->packet_sz) &&
-                       (fifo_count >= musb_ep->packet_sz))
+
+               if (request->short_not_ok && fifo_count == musb_ep->packet_sz)
                        use_mode_1 = 1;
                else
                        use_mode_1 = 0;
@@ -727,6 +728,27 @@ static void rxstate(struct musb *musb, struct musb_request *req)
                                c = musb->dma_controller;
                                channel = musb_ep->dma;
 
+       /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
+        * mode 0 only. So we do not get endpoint interrupts due to DMA
+        * completion. We only get interrupts from DMA controller.
+        *
+        * We could operate in DMA mode 1 if we knew the size of the tranfer
+        * in advance. For mass storage class, request->length = what the host
+        * sends, so that'd work.  But for pretty much everything else,
+        * request->length is routinely more than what the host sends. For
+        * most these gadgets, end of is signified either by a short packet,
+        * or filling the last byte of the buffer.  (Sending extra data in
+        * that last pckate should trigger an overflow fault.)  But in mode 1,
+        * we don't get DMA completion interrupt for short packets.
+        *
+        * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
+        * to get endpoint interrupt on every DMA req, but that didn't seem
+        * to work reliably.
+        *
+        * REVISIT an updated g_file_storage can set req->short_not_ok, which
+        * then becomes usable as a runtime "use mode 1" hint...
+        */
+
                                /* Experimental: Mode1 works with mass storage use cases */
                                if (use_mode_1) {
                                        csr |= MUSB_RXCSR_AUTOCLEAR;
index d62a91f..0e62f50 100644 (file)
@@ -65,7 +65,7 @@ static int __devinit ux500_probe(struct platform_device *pdev)
        struct platform_device          *musb;
        struct ux500_glue               *glue;
        struct clk                      *clk;
-
+       int                             musbid;
        int                             ret = -ENOMEM;
 
        glue = kzalloc(sizeof(*glue), GFP_KERNEL);
index d8c8a42..6223062 100644 (file)
@@ -58,7 +58,7 @@ config USB_ULPI_VIEWPORT
 
 config TWL4030_USB
        tristate "TWL4030 USB Transceiver Driver"
-       depends on TWL4030_CORE && REGULATOR_TWL4030
+       depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
        select USB_OTG_UTILS
        help
          Enable this to support the USB OTG transceiver on TWL4030
@@ -68,7 +68,7 @@ config TWL4030_USB
 
 config TWL6030_USB
        tristate "TWL6030 USB Transceiver Driver"
-       depends on TWL4030_CORE && OMAP_USB2
+       depends on TWL4030_CORE && OMAP_USB2 && USB_MUSB_OMAP2PLUS
        select USB_OTG_UTILS
        help
          Enable this to support the USB OTG transceiver on TWL6030
index 7179b0c..cff8dd5 100644 (file)
@@ -2430,7 +2430,7 @@ static void keyspan_release(struct usb_serial *serial)
 static int keyspan_port_probe(struct usb_serial_port *port)
 {
        struct usb_serial *serial = port->serial;
-       struct keyspan_port_private *s_priv;
+       struct keyspan_serial_private *s_priv;
        struct keyspan_port_private *p_priv;
        const struct keyspan_device_details *d_details;
        struct callbacks *cback;
@@ -2445,7 +2445,6 @@ static int keyspan_port_probe(struct usb_serial_port *port)
        if (!p_priv)
                return -ENOMEM;
 
-       s_priv = usb_get_serial_data(port->serial);
        p_priv->device_details = d_details;
 
        /* Setup values for the various callback routines */
index 5dee7d6..edc64bb 100644 (file)
@@ -158,6 +158,7 @@ static void option_instat_callback(struct urb *urb);
 #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED        0x8001
 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED        0x9000
 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED        0x9001
+#define NOVATELWIRELESS_PRODUCT_E362           0x9010
 #define NOVATELWIRELESS_PRODUCT_G1             0xA001
 #define NOVATELWIRELESS_PRODUCT_G1_M           0xA002
 #define NOVATELWIRELESS_PRODUCT_G2             0xA010
@@ -193,6 +194,9 @@ static void option_instat_callback(struct urb *urb);
 #define DELL_PRODUCT_5730_MINICARD_TELUS       0x8181
 #define DELL_PRODUCT_5730_MINICARD_VZW         0x8182
 
+#define DELL_PRODUCT_5800_MINICARD_VZW         0x8195  /* Novatel E362 */
+#define DELL_PRODUCT_5800_V2_MINICARD_VZW      0x8196  /* Novatel E362 */
+
 #define KYOCERA_VENDOR_ID                      0x0c88
 #define KYOCERA_PRODUCT_KPC650                 0x17da
 #define KYOCERA_PRODUCT_KPC680                 0x180a
@@ -283,6 +287,7 @@ static void option_instat_callback(struct urb *urb);
 /* ALCATEL PRODUCTS */
 #define ALCATEL_VENDOR_ID                      0x1bbb
 #define ALCATEL_PRODUCT_X060S_X200             0x0000
+#define ALCATEL_PRODUCT_X220_X500D             0x0017
 
 #define PIRELLI_VENDOR_ID                      0x1266
 #define PIRELLI_PRODUCT_C100_1                 0x1002
@@ -706,6 +711,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
        /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
        { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
 
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
@@ -728,6 +734,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) },      /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
        { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) },       /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
        { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) },         /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
+       { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) },
        { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) },   /* ADU-E100, ADU-310 */
        { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
        { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@@ -1157,6 +1165,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
          .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
        },
+       { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) },
        { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
        { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
        { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
index 61a73ad..a3e9c09 100644 (file)
@@ -455,9 +455,6 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port,
        struct usb_serial *serial = port->serial;
        struct urb *urb;
 
-       if (endpoint == -1)
-               return NULL;    /* endpoint not needed */
-
        urb = usb_alloc_urb(0, GFP_KERNEL);     /* No ISO */
        if (urb == NULL) {
                dev_dbg(&serial->interface->dev,
@@ -489,6 +486,9 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
        init_usb_anchor(&portdata->delayed);
 
        for (i = 0; i < N_IN_URB; i++) {
+               if (!port->bulk_in_size)
+                       break;
+
                buffer = (u8 *)__get_free_page(GFP_KERNEL);
                if (!buffer)
                        goto bail_out_error;
@@ -502,8 +502,8 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
        }
 
        for (i = 0; i < N_OUT_URB; i++) {
-               if (port->bulk_out_endpointAddress == -1)
-                       continue;
+               if (!port->bulk_out_size)
+                       break;
 
                buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL);
                if (!buffer)
index a3d5436..92f35ab 100644 (file)
@@ -186,6 +186,12 @@ static int slave_configure(struct scsi_device *sdev)
                /* Some devices don't handle VPD pages correctly */
                sdev->skip_vpd_pages = 1;
 
+               /* Do not attempt to use REPORT SUPPORTED OPERATION CODES */
+               sdev->no_report_opcodes = 1;
+
+               /* Do not attempt to use WRITE SAME */
+               sdev->no_write_same = 1;
+
                /* Some disks return the total number of blocks in response
                 * to READ CAPACITY rather than the highest block number.
                 * If this device makes that mistake, tell the sd driver. */
index d64ac38..bee9284 100644 (file)
@@ -365,11 +365,20 @@ struct platform_device *dsi_get_dsidev_from_id(int module)
        struct omap_dss_output *out;
        enum omap_dss_output_id id;
 
-       id = module == 0 ? OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
+       switch (module) {
+       case 0:
+               id = OMAP_DSS_OUTPUT_DSI1;
+               break;
+       case 1:
+               id = OMAP_DSS_OUTPUT_DSI2;
+               break;
+       default:
+               return NULL;
+       }
 
        out = omap_dss_get_output(id);
 
-       return out->pdev;
+       return out ? out->pdev : NULL;
 }
 
 static inline void dsi_write_reg(struct platform_device *dsidev,
index 2ab1c3e..5f6eea8 100644 (file)
@@ -697,11 +697,15 @@ static int dss_get_clocks(void)
 
        dss.dss_clk = clk;
 
-       clk = clk_get(NULL, dss.feat->clk_name);
-       if (IS_ERR(clk)) {
-               DSSERR("Failed to get %s\n", dss.feat->clk_name);
-               r = PTR_ERR(clk);
-               goto err;
+       if (dss.feat->clk_name) {
+               clk = clk_get(NULL, dss.feat->clk_name);
+               if (IS_ERR(clk)) {
+                       DSSERR("Failed to get %s\n", dss.feat->clk_name);
+                       r = PTR_ERR(clk);
+                       goto err;
+               }
+       } else {
+               clk = NULL;
        }
 
        dss.dpll4_m4_ck = clk;
@@ -805,10 +809,10 @@ static int __init dss_init_features(struct device *dev)
 
        if (cpu_is_omap24xx())
                src = &omap24xx_dss_feats;
-       else if (cpu_is_omap34xx())
-               src = &omap34xx_dss_feats;
        else if (cpu_is_omap3630())
                src = &omap3630_dss_feats;
+       else if (cpu_is_omap34xx())
+               src = &omap34xx_dss_feats;
        else if (cpu_is_omap44xx())
                src = &omap44xx_dss_feats;
        else if (soc_is_omap54xx())
index a48a7dd..8c9b8b3 100644 (file)
@@ -644,8 +644,10 @@ static void hdmi_dump_regs(struct seq_file *s)
 {
        mutex_lock(&hdmi.lock);
 
-       if (hdmi_runtime_get())
+       if (hdmi_runtime_get()) {
+               mutex_unlock(&hdmi.lock);
                return;
+       }
 
        hdmi.ip_data.ops->dump_wrapper(&hdmi.ip_data, s);
        hdmi.ip_data.ops->dump_pll(&hdmi.ip_data, s);
index 606b89f..d630b26 100644 (file)
@@ -787,7 +787,7 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
 
        case OMAPFB_WAITFORVSYNC:
                DBG("ioctl WAITFORVSYNC\n");
-               if (!display && !display->output && !display->output->manager) {
+               if (!display || !display->output || !display->output->manager) {
                        r = -EINVAL;
                        break;
                }
index 1e8659c..809b0de 100644 (file)
@@ -225,8 +225,10 @@ EXPORT_SYMBOL_GPL(register_virtio_device);
 
 void unregister_virtio_device(struct virtio_device *dev)
 {
+       int index = dev->index; /* save for after device release */
+
        device_unregister(&dev->dev);
-       ida_simple_remove(&virtio_index_ida, dev->index);
+       ida_simple_remove(&virtio_index_ida, index);
 }
 EXPORT_SYMBOL_GPL(unregister_virtio_device);
 
index 0e86370..7435470 100644 (file)
@@ -2,6 +2,7 @@ ifneq ($(CONFIG_ARM),y)
 obj-y  += manage.o balloon.o
 obj-$(CONFIG_HOTPLUG_CPU)              += cpu_hotplug.o
 endif
+obj-$(CONFIG_X86)                      += fallback.o
 obj-y  += grant-table.o features.o events.o
 obj-y  += xenbus/
 
index 912ac81..0be4df3 100644 (file)
@@ -1395,10 +1395,10 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
 {
        struct pt_regs *old_regs = set_irq_regs(regs);
 
+       irq_enter();
 #ifdef CONFIG_X86
        exit_idle();
 #endif
-       irq_enter();
 
        __xen_evtchn_do_upcall();
 
diff --git a/drivers/xen/fallback.c b/drivers/xen/fallback.c
new file mode 100644 (file)
index 0000000..0ef7c4d
--- /dev/null
@@ -0,0 +1,80 @@
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/bug.h>
+#include <linux/export.h>
+#include <asm/hypervisor.h>
+#include <asm/xen/hypercall.h>
+
+int xen_event_channel_op_compat(int cmd, void *arg)
+{
+       struct evtchn_op op;
+       int rc;
+
+       op.cmd = cmd;
+       memcpy(&op.u, arg, sizeof(op.u));
+       rc = _hypercall1(int, event_channel_op_compat, &op);
+
+       switch (cmd) {
+       case EVTCHNOP_close:
+       case EVTCHNOP_send:
+       case EVTCHNOP_bind_vcpu:
+       case EVTCHNOP_unmask:
+               /* no output */
+               break;
+
+#define COPY_BACK(eop) \
+       case EVTCHNOP_##eop: \
+               memcpy(arg, &op.u.eop, sizeof(op.u.eop)); \
+               break
+
+       COPY_BACK(bind_interdomain);
+       COPY_BACK(bind_virq);
+       COPY_BACK(bind_pirq);
+       COPY_BACK(status);
+       COPY_BACK(alloc_unbound);
+       COPY_BACK(bind_ipi);
+#undef COPY_BACK
+
+       default:
+               WARN_ON(rc != -ENOSYS);
+               break;
+       }
+
+       return rc;
+}
+EXPORT_SYMBOL_GPL(xen_event_channel_op_compat);
+
+int HYPERVISOR_physdev_op_compat(int cmd, void *arg)
+{
+       struct physdev_op op;
+       int rc;
+
+       op.cmd = cmd;
+       memcpy(&op.u, arg, sizeof(op.u));
+       rc = _hypercall1(int, physdev_op_compat, &op);
+
+       switch (cmd) {
+       case PHYSDEVOP_IRQ_UNMASK_NOTIFY:
+       case PHYSDEVOP_set_iopl:
+       case PHYSDEVOP_set_iobitmap:
+       case PHYSDEVOP_apic_write:
+               /* no output */
+               break;
+
+#define COPY_BACK(pop, fld) \
+       case PHYSDEVOP_##pop: \
+               memcpy(arg, &op.u.fld, sizeof(op.u.fld)); \
+               break
+
+       COPY_BACK(irq_status_query, irq_status_query);
+       COPY_BACK(apic_read, apic_op);
+       COPY_BACK(ASSIGN_VECTOR, irq_op);
+#undef COPY_BACK
+
+       default:
+               WARN_ON(rc != -ENOSYS);
+               break;
+       }
+
+       return rc;
+}
index 8adb9cc..71f5c45 100644 (file)
@@ -361,13 +361,13 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
        down_write(&mm->mmap_sem);
 
        vma = find_vma(mm, m.addr);
-       ret = -EINVAL;
        if (!vma ||
            vma->vm_ops != &privcmd_vm_ops ||
            (m.addr != vma->vm_start) ||
            ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
            !privcmd_enforce_singleshot_mapping(vma)) {
                up_write(&mm->mmap_sem);
+               ret = -EINVAL;
                goto out;
        }
 
@@ -383,12 +383,16 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
 
        up_write(&mm->mmap_sem);
 
-       if (state.global_error && (version == 1)) {
-               /* Write back errors in second pass. */
-               state.user_mfn = (xen_pfn_t *)m.arr;
-               state.err      = err_array;
-               ret = traverse_pages(m.num, sizeof(xen_pfn_t),
-                                    &pagelist, mmap_return_errors_v1, &state);
+       if (version == 1) {
+               if (state.global_error) {
+                       /* Write back errors in second pass. */
+                       state.user_mfn = (xen_pfn_t *)m.arr;
+                       state.err      = err_array;
+                       ret = traverse_pages(m.num, sizeof(xen_pfn_t),
+                                            &pagelist, mmap_return_errors_v1, &state);
+               } else
+                       ret = 0;
+
        } else if (version == 2) {
                ret = __copy_to_user(m.err, err_array, m.num * sizeof(int));
                if (ret)
index fc783e2..0fb15bb 100644 (file)
@@ -225,6 +225,13 @@ sid_to_str(struct cifs_sid *sidptr, char *sidstr)
 }
 
 static void
+cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src)
+{
+       memcpy(dst, src, sizeof(*dst));
+       dst->num_subauth = min_t(u8, src->num_subauth, NUM_SUBAUTHS);
+}
+
+static void
 id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr,
                struct cifs_sid_id **psidid, char *typestr)
 {
@@ -248,7 +255,7 @@ id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr,
                }
        }
 
-       memcpy(&(*psidid)->sid, sidptr, sizeof(struct cifs_sid));
+       cifs_copy_sid(&(*psidid)->sid, sidptr);
        (*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
        (*psidid)->refcount = 0;
 
@@ -354,7 +361,7 @@ id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
         * any fields of the node after a reference is put .
         */
        if (test_bit(SID_ID_MAPPED, &psidid->state)) {
-               memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
+               cifs_copy_sid(ssid, &psidid->sid);
                psidid->time = jiffies; /* update ts for accessing */
                goto id_sid_out;
        }
@@ -370,14 +377,14 @@ id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
                if (IS_ERR(sidkey)) {
                        rc = -EINVAL;
                        cFYI(1, "%s: Can't map and id to a SID", __func__);
+               } else if (sidkey->datalen < sizeof(struct cifs_sid)) {
+                       rc = -EIO;
+                       cFYI(1, "%s: Downcall contained malformed key "
+                               "(datalen=%hu)", __func__, sidkey->datalen);
                } else {
                        lsid = (struct cifs_sid *)sidkey->payload.data;
-                       memcpy(&psidid->sid, lsid,
-                               sidkey->datalen < sizeof(struct cifs_sid) ?
-                               sidkey->datalen : sizeof(struct cifs_sid));
-                       memcpy(ssid, &psidid->sid,
-                               sidkey->datalen < sizeof(struct cifs_sid) ?
-                               sidkey->datalen : sizeof(struct cifs_sid));
+                       cifs_copy_sid(&psidid->sid, lsid);
+                       cifs_copy_sid(ssid, &psidid->sid);
                        set_bit(SID_ID_MAPPED, &psidid->state);
                        key_put(sidkey);
                        kfree(psidid->sidstr);
@@ -396,7 +403,7 @@ id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
                        return rc;
                }
                if (test_bit(SID_ID_MAPPED, &psidid->state))
-                       memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
+                       cifs_copy_sid(ssid, &psidid->sid);
                else
                        rc = -EINVAL;
        }
@@ -675,8 +682,6 @@ int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
 static void copy_sec_desc(const struct cifs_ntsd *pntsd,
                                struct cifs_ntsd *pnntsd, __u32 sidsoffset)
 {
-       int i;
-
        struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
        struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
 
@@ -692,26 +697,14 @@ static void copy_sec_desc(const struct cifs_ntsd *pntsd,
        owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
                                le32_to_cpu(pntsd->osidoffset));
        nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
-
-       nowner_sid_ptr->revision = owner_sid_ptr->revision;
-       nowner_sid_ptr->num_subauth = owner_sid_ptr->num_subauth;
-       for (i = 0; i < 6; i++)
-               nowner_sid_ptr->authority[i] = owner_sid_ptr->authority[i];
-       for (i = 0; i < 5; i++)
-               nowner_sid_ptr->sub_auth[i] = owner_sid_ptr->sub_auth[i];
+       cifs_copy_sid(nowner_sid_ptr, owner_sid_ptr);
 
        /* copy group sid */
        group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
                                le32_to_cpu(pntsd->gsidoffset));
        ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
                                        sizeof(struct cifs_sid));
-
-       ngroup_sid_ptr->revision = group_sid_ptr->revision;
-       ngroup_sid_ptr->num_subauth = group_sid_ptr->num_subauth;
-       for (i = 0; i < 6; i++)
-               ngroup_sid_ptr->authority[i] = group_sid_ptr->authority[i];
-       for (i = 0; i < 5; i++)
-               ngroup_sid_ptr->sub_auth[i] = group_sid_ptr->sub_auth[i];
+       cifs_copy_sid(ngroup_sid_ptr, group_sid_ptr);
 
        return;
 }
@@ -1120,8 +1113,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
                                kfree(nowner_sid_ptr);
                                return rc;
                        }
-                       memcpy(owner_sid_ptr, nowner_sid_ptr,
-                                       sizeof(struct cifs_sid));
+                       cifs_copy_sid(owner_sid_ptr, nowner_sid_ptr);
                        kfree(nowner_sid_ptr);
                        *aclflag = CIFS_ACL_OWNER;
                }
@@ -1139,8 +1131,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
                                kfree(ngroup_sid_ptr);
                                return rc;
                        }
-                       memcpy(group_sid_ptr, ngroup_sid_ptr,
-                                       sizeof(struct cifs_sid));
+                       cifs_copy_sid(group_sid_ptr, ngroup_sid_ptr);
                        kfree(ngroup_sid_ptr);
                        *aclflag = CIFS_ACL_GROUP;
                }
index 7c0a812..d3671f2 100644 (file)
@@ -398,7 +398,16 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
         * in network traffic in the other paths.
         */
        if (!(oflags & O_CREAT)) {
-               struct dentry *res = cifs_lookup(inode, direntry, 0);
+               struct dentry *res;
+
+               /*
+                * Check for hashed negative dentry. We have already revalidated
+                * the dentry and it is fine. No need to perform another lookup.
+                */
+               if (!d_unhashed(direntry))
+                       return -ENOENT;
+
+               res = cifs_lookup(inode, direntry, 0);
                if (IS_ERR(res))
                        return PTR_ERR(res);
 
index da72250..cd96649 100644 (file)
@@ -346,7 +346,7 @@ static inline struct epitem *ep_item_from_epqueue(poll_table *p)
 /* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
 static inline int ep_op_has_event(int op)
 {
-       return op == EPOLL_CTL_ADD || op == EPOLL_CTL_MOD;
+       return op != EPOLL_CTL_DEL;
 }
 
 /* Initialize the poll safe wake up structure */
@@ -676,34 +676,6 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
        return 0;
 }
 
-/*
- * Disables a "struct epitem" in the eventpoll set. Returns -EBUSY if the item
- * had no event flags set, indicating that another thread may be currently
- * handling that item's events (in the case that EPOLLONESHOT was being
- * used). Otherwise a zero result indicates that the item has been disabled
- * from receiving events. A disabled item may be re-enabled via
- * EPOLL_CTL_MOD. Must be called with "mtx" held.
- */
-static int ep_disable(struct eventpoll *ep, struct epitem *epi)
-{
-       int result = 0;
-       unsigned long flags;
-
-       spin_lock_irqsave(&ep->lock, flags);
-       if (epi->event.events & ~EP_PRIVATE_BITS) {
-               if (ep_is_linked(&epi->rdllink))
-                       list_del_init(&epi->rdllink);
-               /* Ensure ep_poll_callback will not add epi back onto ready
-                  list: */
-               epi->event.events &= EP_PRIVATE_BITS;
-               }
-       else
-               result = -EBUSY;
-       spin_unlock_irqrestore(&ep->lock, flags);
-
-       return result;
-}
-
 static void ep_free(struct eventpoll *ep)
 {
        struct rb_node *rbp;
@@ -1048,6 +1020,8 @@ static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
        rb_insert_color(&epi->rbn, &ep->rbr);
 }
 
+
+
 #define PATH_ARR_SIZE 5
 /*
  * These are the number paths of length 1 to 5, that we are allowing to emanate
@@ -1813,12 +1787,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
                } else
                        error = -ENOENT;
                break;
-       case EPOLL_CTL_DISABLE:
-               if (epi)
-                       error = ep_disable(ep, epi);
-               else
-                       error = -ENOENT;
-               break;
        }
        mutex_unlock(&ep->mtx);
 
index 7320a66..22548f5 100644 (file)
@@ -2101,8 +2101,9 @@ int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range)
        end = start + (range->len >> sb->s_blocksize_bits) - 1;
        minlen = range->minlen >> sb->s_blocksize_bits;
 
-       if (unlikely(minlen > EXT3_BLOCKS_PER_GROUP(sb)) ||
-           unlikely(start >= max_blks))
+       if (minlen > EXT3_BLOCKS_PER_GROUP(sb) ||
+           start >= max_blks ||
+           range->len < sb->s_blocksize)
                return -EINVAL;
        if (end >= max_blks)
                end = max_blks - 1;
index 708d997..7cb71b9 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -685,7 +685,6 @@ void do_close_on_exec(struct files_struct *files)
        struct fdtable *fdt;
 
        /* exec unshares first */
-       BUG_ON(atomic_read(&files->count) != 1);
        spin_lock(&files->file_lock);
        for (i = 0; ; i++) {
                unsigned long set;
index 0def050..e056b4c 100644 (file)
@@ -516,15 +516,13 @@ static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
                struct gfs2_holder i_gh;
                int error;
 
-               gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
-               error = gfs2_glock_nq(&i_gh);
-               if (error == 0) {
-                       file_accessed(file);
-                       gfs2_glock_dq(&i_gh);
-               }
-               gfs2_holder_uninit(&i_gh);
+               error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
+                                          &i_gh);
                if (error)
                        return error;
+               /* grab lock to update inode */
+               gfs2_glock_dq_uninit(&i_gh);
+               file_accessed(file);
        }
        vma->vm_ops = &gfs2_vm_ops;
 
@@ -677,10 +675,8 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
        size_t writesize = iov_length(iov, nr_segs);
        struct dentry *dentry = file->f_dentry;
        struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
-       struct gfs2_sbd *sdp;
        int ret;
 
-       sdp = GFS2_SB(file->f_mapping->host);
        ret = gfs2_rs_alloc(ip);
        if (ret)
                return ret;
index 8ff95a2..9ceccb1 100644 (file)
@@ -393,12 +393,10 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
        struct gfs2_meta_header *mh;
        struct gfs2_trans *tr;
 
-       lock_buffer(bd->bd_bh);
-       gfs2_log_lock(sdp);
        tr = current->journal_info;
        tr->tr_touched = 1;
        if (!list_empty(&bd->bd_list))
-               goto out;
+               return;
        set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
        set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
        mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
@@ -414,9 +412,6 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
        sdp->sd_log_num_buf++;
        list_add(&bd->bd_list, &sdp->sd_log_le_buf);
        tr->tr_num_buf_new++;
-out:
-       gfs2_log_unlock(sdp);
-       unlock_buffer(bd->bd_bh);
 }
 
 static void gfs2_check_magic(struct buffer_head *bh)
@@ -621,7 +616,6 @@ static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
 
 static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
 {
-       struct gfs2_log_descriptor *ld;
        struct gfs2_meta_header *mh;
        unsigned int offset;
        struct list_head *head = &sdp->sd_log_le_revoke;
@@ -634,7 +628,6 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
 
        length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
        page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
-       ld = page_address(page);
        offset = sizeof(struct gfs2_log_descriptor);
 
        list_for_each_entry(bd, head, bd_list) {
@@ -777,12 +770,10 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
        struct address_space *mapping = bd->bd_bh->b_page->mapping;
        struct gfs2_inode *ip = GFS2_I(mapping->host);
 
-       lock_buffer(bd->bd_bh);
-       gfs2_log_lock(sdp);
        if (tr)
                tr->tr_touched = 1;
        if (!list_empty(&bd->bd_list))
-               goto out;
+               return;
        set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
        set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
        if (gfs2_is_jdata(ip)) {
@@ -793,9 +784,6 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
        } else {
                list_add_tail(&bd->bd_list, &sdp->sd_log_le_ordered);
        }
-out:
-       gfs2_log_unlock(sdp);
-       unlock_buffer(bd->bd_bh);
 }
 
 /**
index 40c4b0d..c5af8e1 100644 (file)
@@ -497,8 +497,11 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
        struct gfs2_quota_data **qd;
        int error;
 
-       if (ip->i_res == NULL)
-               gfs2_rs_alloc(ip);
+       if (ip->i_res == NULL) {
+               error = gfs2_rs_alloc(ip);
+               if (error)
+                       return error;
+       }
 
        qd = ip->i_res->rs_qa_qd;
 
index 3cc402c..38fe18f 100644 (file)
@@ -553,7 +553,6 @@ void gfs2_free_clones(struct gfs2_rgrpd *rgd)
  */
 int gfs2_rs_alloc(struct gfs2_inode *ip)
 {
-       int error = 0;
        struct gfs2_blkreserv *res;
 
        if (ip->i_res)
@@ -561,7 +560,7 @@ int gfs2_rs_alloc(struct gfs2_inode *ip)
 
        res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS);
        if (!res)
-               error = -ENOMEM;
+               return -ENOMEM;
 
        RB_CLEAR_NODE(&res->rs_node);
 
@@ -571,7 +570,7 @@ int gfs2_rs_alloc(struct gfs2_inode *ip)
        else
                ip->i_res = res;
        up_write(&ip->i_rw_mutex);
-       return error;
+       return 0;
 }
 
 static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
@@ -1263,7 +1262,9 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
        int ret = 0;
        u64 amt;
        u64 trimmed = 0;
+       u64 start, end, minlen;
        unsigned int x;
+       unsigned bs_shift = sdp->sd_sb.sb_bsize_shift;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
@@ -1271,19 +1272,25 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
        if (!blk_queue_discard(q))
                return -EOPNOTSUPP;
 
-       if (argp == NULL) {
-               r.start = 0;
-               r.len = ULLONG_MAX;
-               r.minlen = 0;
-       } else if (copy_from_user(&r, argp, sizeof(r)))
+       if (copy_from_user(&r, argp, sizeof(r)))
                return -EFAULT;
 
        ret = gfs2_rindex_update(sdp);
        if (ret)
                return ret;
 
-       rgd = gfs2_blk2rgrpd(sdp, r.start, 0);
-       rgd_end = gfs2_blk2rgrpd(sdp, r.start + r.len, 0);
+       start = r.start >> bs_shift;
+       end = start + (r.len >> bs_shift);
+       minlen = max_t(u64, r.minlen,
+                      q->limits.discard_granularity) >> bs_shift;
+
+       rgd = gfs2_blk2rgrpd(sdp, start, 0);
+       rgd_end = gfs2_blk2rgrpd(sdp, end - 1, 0);
+
+       if (end <= start ||
+           minlen > sdp->sd_max_rg_data ||
+           start > rgd_end->rd_data0 + rgd_end->rd_data)
+               return -EINVAL;
 
        while (1) {
 
@@ -1295,7 +1302,9 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
                        /* Trim each bitmap in the rgrp */
                        for (x = 0; x < rgd->rd_length; x++) {
                                struct gfs2_bitmap *bi = rgd->rd_bits + x;
-                               ret = gfs2_rgrp_send_discards(sdp, rgd->rd_data0, NULL, bi, r.minlen, &amt);
+                               ret = gfs2_rgrp_send_discards(sdp,
+                                               rgd->rd_data0, NULL, bi, minlen,
+                                               &amt);
                                if (ret) {
                                        gfs2_glock_dq_uninit(&gh);
                                        goto out;
@@ -1324,7 +1333,7 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
 
 out:
        r.len = trimmed << 9;
-       if (argp && copy_to_user(argp, &r, sizeof(r)))
+       if (copy_to_user(argp, &r, sizeof(r)))
                return -EFAULT;
 
        return ret;
index bc73726..d648867 100644 (file)
@@ -810,7 +810,8 @@ static void gfs2_dirty_inode(struct inode *inode, int flags)
                        return;
                }
                need_unlock = 1;
-       }
+       } else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
+               return;
 
        if (current->journal_info == NULL) {
                ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
index adbd278..4136270 100644 (file)
@@ -155,14 +155,22 @@ void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta)
        struct gfs2_sbd *sdp = gl->gl_sbd;
        struct gfs2_bufdata *bd;
 
+       lock_buffer(bh);
+       gfs2_log_lock(sdp);
        bd = bh->b_private;
        if (bd)
                gfs2_assert(sdp, bd->bd_gl == gl);
        else {
+               gfs2_log_unlock(sdp);
+               unlock_buffer(bh);
                gfs2_attach_bufdata(gl, bh, meta);
                bd = bh->b_private;
+               lock_buffer(bh);
+               gfs2_log_lock(sdp);
        }
        lops_add(sdp, bd);
+       gfs2_log_unlock(sdp);
+       unlock_buffer(bh);
 }
 
 void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
index 60ef3fb..1506673 100644 (file)
@@ -138,33 +138,39 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
        struct page *pg;
        struct inode *inode = mapping->host;
        struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+       struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+       struct jffs2_raw_inode ri;
+       uint32_t alloc_len = 0;
        pgoff_t index = pos >> PAGE_CACHE_SHIFT;
        uint32_t pageofs = index << PAGE_CACHE_SHIFT;
        int ret = 0;
 
+       jffs2_dbg(1, "%s()\n", __func__);
+
+       if (pageofs > inode->i_size) {
+               ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
+                                         ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
+               if (ret)
+                       return ret;
+       }
+
+       mutex_lock(&f->sem);
        pg = grab_cache_page_write_begin(mapping, index, flags);
-       if (!pg)
+       if (!pg) {
+               if (alloc_len)
+                       jffs2_complete_reservation(c);
+               mutex_unlock(&f->sem);
                return -ENOMEM;
+       }
        *pagep = pg;
 
-       jffs2_dbg(1, "%s()\n", __func__);
-
-       if (pageofs > inode->i_size) {
+       if (alloc_len) {
                /* Make new hole frag from old EOF to new page */
-               struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
-               struct jffs2_raw_inode ri;
                struct jffs2_full_dnode *fn;
-               uint32_t alloc_len;
 
                jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
                          (unsigned int)inode->i_size, pageofs);
 
-               ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
-                                         ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
-               if (ret)
-                       goto out_page;
-
-               mutex_lock(&f->sem);
                memset(&ri, 0, sizeof(ri));
 
                ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -191,7 +197,6 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
                if (IS_ERR(fn)) {
                        ret = PTR_ERR(fn);
                        jffs2_complete_reservation(c);
-                       mutex_unlock(&f->sem);
                        goto out_page;
                }
                ret = jffs2_add_full_dnode_to_inode(c, f, fn);
@@ -206,12 +211,10 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
                        jffs2_mark_node_obsolete(c, fn->raw);
                        jffs2_free_full_dnode(fn);
                        jffs2_complete_reservation(c);
-                       mutex_unlock(&f->sem);
                        goto out_page;
                }
                jffs2_complete_reservation(c);
                inode->i_size = pageofs;
-               mutex_unlock(&f->sem);
        }
 
        /*
@@ -220,18 +223,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
         * case of a short-copy.
         */
        if (!PageUptodate(pg)) {
-               mutex_lock(&f->sem);
                ret = jffs2_do_readpage_nolock(inode, pg);
-               mutex_unlock(&f->sem);
                if (ret)
                        goto out_page;
        }
+       mutex_unlock(&f->sem);
        jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
        return ret;
 
 out_page:
        unlock_page(pg);
        page_cache_release(pg);
+       mutex_unlock(&f->sem);
        return ret;
 }
 
index f35794b..a506360 100644 (file)
@@ -21,6 +21,7 @@ static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new)
                        if ((old->path.mnt == new->path.mnt) &&
                            (old->path.dentry == new->path.dentry))
                                return true;
+                       break;
                case (FSNOTIFY_EVENT_NONE):
                        return true;
                default:
index 721d692..6fcaeb8 100644 (file)
@@ -258,7 +258,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
        if (ret)
                goto out_close_fd;
 
-       fd_install(fd, f);
+       if (fd != FAN_NOFD)
+               fd_install(fd, f);
        return fanotify_event_metadata.event_len;
 
 out_close_fd:
index 144a967..3c231ad 100644 (file)
@@ -873,6 +873,113 @@ static const struct file_operations proc_environ_operations = {
        .release        = mem_release,
 };
 
+static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
+                           loff_t *ppos)
+{
+       struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
+       char buffer[PROC_NUMBUF];
+       int oom_adj = OOM_ADJUST_MIN;
+       size_t len;
+       unsigned long flags;
+
+       if (!task)
+               return -ESRCH;
+       if (lock_task_sighand(task, &flags)) {
+               if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX)
+                       oom_adj = OOM_ADJUST_MAX;
+               else
+                       oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) /
+                                 OOM_SCORE_ADJ_MAX;
+               unlock_task_sighand(task, &flags);
+       }
+       put_task_struct(task);
+       len = snprintf(buffer, sizeof(buffer), "%d\n", oom_adj);
+       return simple_read_from_buffer(buf, count, ppos, buffer, len);
+}
+
+static ssize_t oom_adj_write(struct file *file, const char __user *buf,
+                            size_t count, loff_t *ppos)
+{
+       struct task_struct *task;
+       char buffer[PROC_NUMBUF];
+       int oom_adj;
+       unsigned long flags;
+       int err;
+
+       memset(buffer, 0, sizeof(buffer));
+       if (count > sizeof(buffer) - 1)
+               count = sizeof(buffer) - 1;
+       if (copy_from_user(buffer, buf, count)) {
+               err = -EFAULT;
+               goto out;
+       }
+
+       err = kstrtoint(strstrip(buffer), 0, &oom_adj);
+       if (err)
+               goto out;
+       if ((oom_adj < OOM_ADJUST_MIN || oom_adj > OOM_ADJUST_MAX) &&
+            oom_adj != OOM_DISABLE) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       task = get_proc_task(file->f_path.dentry->d_inode);
+       if (!task) {
+               err = -ESRCH;
+               goto out;
+       }
+
+       task_lock(task);
+       if (!task->mm) {
+               err = -EINVAL;
+               goto err_task_lock;
+       }
+
+       if (!lock_task_sighand(task, &flags)) {
+               err = -ESRCH;
+               goto err_task_lock;
+       }
+
+       /*
+        * Scale /proc/pid/oom_score_adj appropriately ensuring that a maximum
+        * value is always attainable.
+        */
+       if (oom_adj == OOM_ADJUST_MAX)
+               oom_adj = OOM_SCORE_ADJ_MAX;
+       else
+               oom_adj = (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE;
+
+       if (oom_adj < task->signal->oom_score_adj &&
+           !capable(CAP_SYS_RESOURCE)) {
+               err = -EACCES;
+               goto err_sighand;
+       }
+
+       /*
+        * /proc/pid/oom_adj is provided for legacy purposes, ask users to use
+        * /proc/pid/oom_score_adj instead.
+        */
+       printk_once(KERN_WARNING "%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n",
+                 current->comm, task_pid_nr(current), task_pid_nr(task),
+                 task_pid_nr(task));
+
+       task->signal->oom_score_adj = oom_adj;
+       trace_oom_score_adj_update(task);
+err_sighand:
+       unlock_task_sighand(task, &flags);
+err_task_lock:
+       task_unlock(task);
+       put_task_struct(task);
+out:
+       return err < 0 ? err : count;
+}
+
+static const struct file_operations proc_oom_adj_operations = {
+       .read           = oom_adj_read,
+       .write          = oom_adj_write,
+       .llseek         = generic_file_llseek,
+};
+
 static ssize_t oom_score_adj_read(struct file *file, char __user *buf,
                                        size_t count, loff_t *ppos)
 {
@@ -2598,6 +2705,7 @@ static const struct pid_entry tgid_base_stuff[] = {
        REG("cgroup",  S_IRUGO, proc_cgroup_operations),
 #endif
        INF("oom_score",  S_IRUGO, proc_oom_score),
+       REG("oom_adj",    S_IRUGO|S_IWUSR, proc_oom_adj_operations),
        REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
 #ifdef CONFIG_AUDITSYSCALL
        REG("loginuid",   S_IWUSR|S_IRUGO, proc_loginuid_operations),
@@ -2964,6 +3072,7 @@ static const struct pid_entry tid_base_stuff[] = {
        REG("cgroup",  S_IRUGO, proc_cgroup_operations),
 #endif
        INF("oom_score", S_IRUGO, proc_oom_score),
+       REG("oom_adj",   S_IRUGO|S_IWUSR, proc_oom_adj_operations),
        REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
 #ifdef CONFIG_AUDITSYSCALL
        REG("loginuid",  S_IWUSR|S_IRUGO, proc_loginuid_operations),
index a40da07..947fbe0 100644 (file)
@@ -161,6 +161,7 @@ static void pstore_console_write(struct console *con, const char *s, unsigned c)
 
        while (s < e) {
                unsigned long flags;
+               u64 id;
 
                if (c > psinfo->bufsize)
                        c = psinfo->bufsize;
@@ -172,7 +173,7 @@ static void pstore_console_write(struct console *con, const char *s, unsigned c)
                        spin_lock_irqsave(&psinfo->buf_lock, flags);
                }
                memcpy(psinfo->buf, s, c);
-               psinfo->write(PSTORE_TYPE_CONSOLE, 0, NULL, 0, c, psinfo);
+               psinfo->write(PSTORE_TYPE_CONSOLE, 0, &id, 0, c, psinfo);
                spin_unlock_irqrestore(&psinfo->buf_lock, flags);
                s += c;
                c = e - s;
index f27f01a..d83736f 100644 (file)
@@ -1782,8 +1782,9 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
 
        BUG_ON(!th->t_trans_id);
 
-       dquot_initialize(inode);
+       reiserfs_write_unlock(inode->i_sb);
        err = dquot_alloc_inode(inode);
+       reiserfs_write_lock(inode->i_sb);
        if (err)
                goto out_end_trans;
        if (!dir->i_nlink) {
@@ -1979,8 +1980,10 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
 
       out_end_trans:
        journal_end(th, th->t_super, th->t_blocks_allocated);
+       reiserfs_write_unlock(inode->i_sb);
        /* Drop can be outside and it needs more credits so it's better to have it outside */
        dquot_drop(inode);
+       reiserfs_write_lock(inode->i_sb);
        inode->i_flags |= S_NOQUOTA;
        make_bad_inode(inode);
 
@@ -3103,10 +3106,9 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
        /* must be turned off for recursive notify_change calls */
        ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
 
-       depth = reiserfs_write_lock_once(inode->i_sb);
        if (is_quota_modification(inode, attr))
                dquot_initialize(inode);
-
+       depth = reiserfs_write_lock_once(inode->i_sb);
        if (attr->ia_valid & ATTR_SIZE) {
                /* version 2 items will be caught by the s_maxbytes check
                 ** done for us in vmtruncate
@@ -3170,7 +3172,9 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
                error = journal_begin(&th, inode->i_sb, jbegin_count);
                if (error)
                        goto out;
+               reiserfs_write_unlock_once(inode->i_sb, depth);
                error = dquot_transfer(inode, attr);
+               depth = reiserfs_write_lock_once(inode->i_sb);
                if (error) {
                        journal_end(&th, inode->i_sb, jbegin_count);
                        goto out;
index f8afa4b..2f40a4c 100644 (file)
@@ -1968,7 +1968,9 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
                       key2type(&(key->on_disk_key)));
 #endif
 
+       reiserfs_write_unlock(inode->i_sb);
        retval = dquot_alloc_space_nodirty(inode, pasted_size);
+       reiserfs_write_lock(inode->i_sb);
        if (retval) {
                pathrelse(search_path);
                return retval;
@@ -2061,9 +2063,11 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
                               "reiserquota insert_item(): allocating %u id=%u type=%c",
                               quota_bytes, inode->i_uid, head2type(ih));
 #endif
+               reiserfs_write_unlock(inode->i_sb);
                /* We can't dirty inode here. It would be immediately written but
                 * appropriate stat item isn't inserted yet... */
                retval = dquot_alloc_space_nodirty(inode, quota_bytes);
+               reiserfs_write_lock(inode->i_sb);
                if (retval) {
                        pathrelse(path);
                        return retval;
index 1078ae1..418bdc3 100644 (file)
@@ -298,7 +298,9 @@ static int finish_unfinished(struct super_block *s)
                        retval = remove_save_link_only(s, &save_link_key, 0);
                        continue;
                }
+               reiserfs_write_unlock(s);
                dquot_initialize(inode);
+               reiserfs_write_lock(s);
 
                if (truncate && S_ISDIR(inode->i_mode)) {
                        /* We got a truncate request for a dir which is impossible.
@@ -1335,7 +1337,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
                                kfree(qf_names[i]);
 #endif
                err = -EINVAL;
-               goto out_err;
+               goto out_unlock;
        }
 #ifdef CONFIG_QUOTA
        handle_quota_files(s, qf_names, &qfmt);
@@ -1379,7 +1381,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
        if (blocks) {
                err = reiserfs_resize(s, blocks);
                if (err != 0)
-                       goto out_err;
+                       goto out_unlock;
        }
 
        if (*mount_flags & MS_RDONLY) {
@@ -1389,9 +1391,15 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
                        /* it is read-only already */
                        goto out_ok;
 
+               /*
+                * Drop write lock. Quota will retake it when needed and lock
+                * ordering requires calling dquot_suspend() without it.
+                */
+               reiserfs_write_unlock(s);
                err = dquot_suspend(s, -1);
                if (err < 0)
                        goto out_err;
+               reiserfs_write_lock(s);
 
                /* try to remount file system with read-only permissions */
                if (sb_umount_state(rs) == REISERFS_VALID_FS
@@ -1401,7 +1409,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
 
                err = journal_begin(&th, s, 10);
                if (err)
-                       goto out_err;
+                       goto out_unlock;
 
                /* Mounting a rw partition read-only. */
                reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
@@ -1416,7 +1424,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
 
                if (reiserfs_is_journal_aborted(journal)) {
                        err = journal->j_errno;
-                       goto out_err;
+                       goto out_unlock;
                }
 
                handle_data_mode(s, mount_options);
@@ -1425,7 +1433,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
                s->s_flags &= ~MS_RDONLY;       /* now it is safe to call journal_begin */
                err = journal_begin(&th, s, 10);
                if (err)
-                       goto out_err;
+                       goto out_unlock;
 
                /* Mount a partition which is read-only, read-write */
                reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
@@ -1442,10 +1450,16 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
        SB_JOURNAL(s)->j_must_wait = 1;
        err = journal_end(&th, s, 10);
        if (err)
-               goto out_err;
+               goto out_unlock;
 
        if (!(*mount_flags & MS_RDONLY)) {
+               /*
+                * Drop write lock. Quota will retake it when needed and lock
+                * ordering requires calling dquot_resume() without it.
+                */
+               reiserfs_write_unlock(s);
                dquot_resume(s, -1);
+               reiserfs_write_lock(s);
                finish_unfinished(s);
                reiserfs_xattr_init(s, *mount_flags);
        }
@@ -1455,9 +1469,10 @@ out_ok:
        reiserfs_write_unlock(s);
        return 0;
 
+out_unlock:
+       reiserfs_write_unlock(s);
 out_err:
        kfree(new_opts);
-       reiserfs_write_unlock(s);
        return err;
 }
 
@@ -2095,13 +2110,15 @@ static int reiserfs_write_dquot(struct dquot *dquot)
                          REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
        if (ret)
                goto out;
+       reiserfs_write_unlock(dquot->dq_sb);
        ret = dquot_commit(dquot);
+       reiserfs_write_lock(dquot->dq_sb);
        err =
            journal_end(&th, dquot->dq_sb,
                        REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
        if (!ret && err)
                ret = err;
-      out:
+out:
        reiserfs_write_unlock(dquot->dq_sb);
        return ret;
 }
@@ -2117,13 +2134,15 @@ static int reiserfs_acquire_dquot(struct dquot *dquot)
                          REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
        if (ret)
                goto out;
+       reiserfs_write_unlock(dquot->dq_sb);
        ret = dquot_acquire(dquot);
+       reiserfs_write_lock(dquot->dq_sb);
        err =
            journal_end(&th, dquot->dq_sb,
                        REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
        if (!ret && err)
                ret = err;
-      out:
+out:
        reiserfs_write_unlock(dquot->dq_sb);
        return ret;
 }
@@ -2137,19 +2156,21 @@ static int reiserfs_release_dquot(struct dquot *dquot)
        ret =
            journal_begin(&th, dquot->dq_sb,
                          REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb));
+       reiserfs_write_unlock(dquot->dq_sb);
        if (ret) {
                /* Release dquot anyway to avoid endless cycle in dqput() */
                dquot_release(dquot);
                goto out;
        }
        ret = dquot_release(dquot);
+       reiserfs_write_lock(dquot->dq_sb);
        err =
            journal_end(&th, dquot->dq_sb,
                        REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb));
        if (!ret && err)
                ret = err;
-      out:
        reiserfs_write_unlock(dquot->dq_sb);
+out:
        return ret;
 }
 
@@ -2174,11 +2195,13 @@ static int reiserfs_write_info(struct super_block *sb, int type)
        ret = journal_begin(&th, sb, 2);
        if (ret)
                goto out;
+       reiserfs_write_unlock(sb);
        ret = dquot_commit_info(sb, type);
+       reiserfs_write_lock(sb);
        err = journal_end(&th, sb, 2);
        if (!ret && err)
                ret = err;
-      out:
+out:
        reiserfs_write_unlock(sb);
        return ret;
 }
@@ -2203,8 +2226,11 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
        struct reiserfs_transaction_handle th;
        int opt = type == USRQUOTA ? REISERFS_USRQUOTA : REISERFS_GRPQUOTA;
 
-       if (!(REISERFS_SB(sb)->s_mount_opt & (1 << opt)))
-               return -EINVAL;
+       reiserfs_write_lock(sb);
+       if (!(REISERFS_SB(sb)->s_mount_opt & (1 << opt))) {
+               err = -EINVAL;
+               goto out;
+       }
 
        /* Quotafile not on the same filesystem? */
        if (path->dentry->d_sb != sb) {
@@ -2246,8 +2272,10 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
                if (err)
                        goto out;
        }
-       err = dquot_quota_on(sb, type, format_id, path);
+       reiserfs_write_unlock(sb);
+       return dquot_quota_on(sb, type, format_id, path);
 out:
+       reiserfs_write_unlock(sb);
        return err;
 }
 
@@ -2320,7 +2348,9 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
                tocopy = sb->s_blocksize - offset < towrite ?
                    sb->s_blocksize - offset : towrite;
                tmp_bh.b_state = 0;
+               reiserfs_write_lock(sb);
                err = reiserfs_get_block(inode, blk, &tmp_bh, GET_BLOCK_CREATE);
+               reiserfs_write_unlock(sb);
                if (err)
                        goto out;
                if (offset || tocopy != sb->s_blocksize)
@@ -2336,10 +2366,12 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
                flush_dcache_page(bh->b_page);
                set_buffer_uptodate(bh);
                unlock_buffer(bh);
+               reiserfs_write_lock(sb);
                reiserfs_prepare_for_journal(sb, bh, 1);
                journal_mark_dirty(current->journal_info, sb, bh);
                if (!journal_quota)
                        reiserfs_add_ordered_list(inode, bh);
+               reiserfs_write_unlock(sb);
                brelse(bh);
                offset = 0;
                towrite -= tocopy;
index 28ec13a..2dcf3d4 100644 (file)
@@ -681,8 +681,16 @@ int ubifs_find_free_leb_for_idx(struct ubifs_info *c)
        if (!lprops) {
                lprops = ubifs_fast_find_freeable(c);
                if (!lprops) {
-                       ubifs_assert(c->freeable_cnt == 0);
-                       if (c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) {
+                       /*
+                        * The first condition means the following: go scan the
+                        * LPT if there are uncategorized lprops, which means
+                        * there may be freeable LEBs there (UBIFS does not
+                        * store the information about freeable LEBs in the
+                        * master node).
+                        */
+                       if (c->in_a_category_cnt != c->main_lebs ||
+                           c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) {
+                               ubifs_assert(c->freeable_cnt == 0);
                                lprops = scan_for_leb_for_idx(c);
                                if (IS_ERR(lprops)) {
                                        err = PTR_ERR(lprops);
index e5a2a35..46190a7 100644 (file)
@@ -300,8 +300,11 @@ void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops,
        default:
                ubifs_assert(0);
        }
+
        lprops->flags &= ~LPROPS_CAT_MASK;
        lprops->flags |= cat;
+       c->in_a_category_cnt += 1;
+       ubifs_assert(c->in_a_category_cnt <= c->main_lebs);
 }
 
 /**
@@ -334,6 +337,9 @@ static void ubifs_remove_from_cat(struct ubifs_info *c,
        default:
                ubifs_assert(0);
        }
+
+       c->in_a_category_cnt -= 1;
+       ubifs_assert(c->in_a_category_cnt >= 0);
 }
 
 /**
index 5486346..d133c27 100644 (file)
@@ -1183,6 +1183,8 @@ struct ubifs_debug_info;
  * @freeable_list: list of freeable non-index LEBs (free + dirty == @leb_size)
  * @frdi_idx_list: list of freeable index LEBs (free + dirty == @leb_size)
  * @freeable_cnt: number of freeable LEBs in @freeable_list
+ * @in_a_category_cnt: count of lprops which are in a certain category, which
+ *                     basically meants that they were loaded from the flash
  *
  * @ltab_lnum: LEB number of LPT's own lprops table
  * @ltab_offs: offset of LPT's own lprops table
@@ -1412,6 +1414,7 @@ struct ubifs_info {
        struct list_head freeable_list;
        struct list_head frdi_idx_list;
        int freeable_cnt;
+       int in_a_category_cnt;
 
        int ltab_lnum;
        int ltab_offs;
index 4f33c32..335206a 100644 (file)
@@ -1866,6 +1866,7 @@ xfs_alloc_fix_freelist(
        /*
         * Initialize the args structure.
         */
+       memset(&targs, 0, sizeof(targs));
        targs.tp = tp;
        targs.mp = mp;
        targs.agbp = agbp;
@@ -2207,7 +2208,7 @@ xfs_alloc_read_agf(
  * group or loop over the allocation groups to find the result.
  */
 int                            /* error */
-__xfs_alloc_vextent(
+xfs_alloc_vextent(
        xfs_alloc_arg_t *args)  /* allocation argument structure */
 {
        xfs_agblock_t   agsize; /* allocation group size */
@@ -2417,46 +2418,6 @@ error0:
        return error;
 }
 
-static void
-xfs_alloc_vextent_worker(
-       struct work_struct      *work)
-{
-       struct xfs_alloc_arg    *args = container_of(work,
-                                               struct xfs_alloc_arg, work);
-       unsigned long           pflags;
-
-       /* we are in a transaction context here */
-       current_set_flags_nested(&pflags, PF_FSTRANS);
-
-       args->result = __xfs_alloc_vextent(args);
-       complete(args->done);
-
-       current_restore_flags_nested(&pflags, PF_FSTRANS);
-}
-
-/*
- * Data allocation requests often come in with little stack to work on. Push
- * them off to a worker thread so there is lots of stack to use. Metadata
- * requests, OTOH, are generally from low stack usage paths, so avoid the
- * context switch overhead here.
- */
-int
-xfs_alloc_vextent(
-       struct xfs_alloc_arg    *args)
-{
-       DECLARE_COMPLETION_ONSTACK(done);
-
-       if (!args->userdata)
-               return __xfs_alloc_vextent(args);
-
-
-       args->done = &done;
-       INIT_WORK_ONSTACK(&args->work, xfs_alloc_vextent_worker);
-       queue_work(xfs_alloc_wq, &args->work);
-       wait_for_completion(&done);
-       return args->result;
-}
-
 /*
  * Free an extent.
  * Just break up the extent address and hand off to xfs_free_ag_extent
index 93be4a6..feacb06 100644 (file)
@@ -120,9 +120,6 @@ typedef struct xfs_alloc_arg {
        char            isfl;           /* set if is freelist blocks - !acctg */
        char            userdata;       /* set if this is user data */
        xfs_fsblock_t   firstblock;     /* io first block allocated */
-       struct completion *done;
-       struct work_struct work;
-       int             result;
 } xfs_alloc_arg_t;
 
 /*
index f1647ca..f7876c6 100644 (file)
@@ -121,6 +121,8 @@ xfs_allocbt_free_block(
        xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
                              XFS_EXTENT_BUSY_SKIP_DISCARD);
        xfs_trans_agbtree_delta(cur->bc_tp, -1);
+
+       xfs_trans_binval(cur->bc_tp, bp);
        return 0;
 }
 
index e562dd4..e57e2da 100644 (file)
@@ -481,11 +481,17 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
  *
  * The fix is two passes across the ioend list - one to start writeback on the
  * buffer_heads, and then submit them for I/O on the second pass.
+ *
+ * If @fail is non-zero, it means that we have a situation where some part of
+ * the submission process has failed after we have marked paged for writeback
+ * and unlocked them. In this situation, we need to fail the ioend chain rather
+ * than submit it to IO. This typically only happens on a filesystem shutdown.
  */
 STATIC void
 xfs_submit_ioend(
        struct writeback_control *wbc,
-       xfs_ioend_t             *ioend)
+       xfs_ioend_t             *ioend,
+       int                     fail)
 {
        xfs_ioend_t             *head = ioend;
        xfs_ioend_t             *next;
@@ -506,6 +512,18 @@ xfs_submit_ioend(
                next = ioend->io_list;
                bio = NULL;
 
+               /*
+                * If we are failing the IO now, just mark the ioend with an
+                * error and finish it. This will run IO completion immediately
+                * as there is only one reference to the ioend at this point in
+                * time.
+                */
+               if (fail) {
+                       ioend->io_error = -fail;
+                       xfs_finish_ioend(ioend);
+                       continue;
+               }
+
                for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
 
                        if (!bio) {
@@ -1060,7 +1078,18 @@ xfs_vm_writepage(
 
        xfs_start_page_writeback(page, 1, count);
 
-       if (ioend && imap_valid) {
+       /* if there is no IO to be submitted for this page, we are done */
+       if (!ioend)
+               return 0;
+
+       ASSERT(iohead);
+
+       /*
+        * Any errors from this point onwards need tobe reported through the IO
+        * completion path as we have marked the initial page as under writeback
+        * and unlocked it.
+        */
+       if (imap_valid) {
                xfs_off_t               end_index;
 
                end_index = imap.br_startoff + imap.br_blockcount;
@@ -1079,20 +1108,15 @@ xfs_vm_writepage(
                                  wbc, end_index);
        }
 
-       if (iohead) {
-               /*
-                * Reserve log space if we might write beyond the on-disk
-                * inode size.
-                */
-               if (ioend->io_type != XFS_IO_UNWRITTEN &&
-                   xfs_ioend_is_append(ioend)) {
-                       err = xfs_setfilesize_trans_alloc(ioend);
-                       if (err)
-                               goto error;
-               }
 
-               xfs_submit_ioend(wbc, iohead);
-       }
+       /*
+        * Reserve log space if we might write beyond the on-disk inode size.
+        */
+       err = 0;
+       if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
+               err = xfs_setfilesize_trans_alloc(ioend);
+
+       xfs_submit_ioend(wbc, iohead, err);
 
        return 0;
 
index d330111..70eec18 100644 (file)
@@ -1291,6 +1291,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
        leaf2 = blk2->bp->b_addr;
        ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
        ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
+       ASSERT(leaf2->hdr.count == 0);
        args = state->args;
 
        trace_xfs_attr_leaf_rebalance(args);
@@ -1361,6 +1362,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
                 * I assert that since all callers pass in an empty
                 * second buffer, this code should never execute.
                 */
+               ASSERT(0);
 
                /*
                 * Figure the total bytes to be added to the destination leaf.
@@ -1422,10 +1424,24 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
                        args->index2 = 0;
                        args->blkno2 = blk2->blkno;
                } else {
+                       /*
+                        * On a double leaf split, the original attr location
+                        * is already stored in blkno2/index2, so don't
+                        * overwrite it overwise we corrupt the tree.
+                        */
                        blk2->index = blk1->index
                                    - be16_to_cpu(leaf1->hdr.count);
-                       args->index = args->index2 = blk2->index;
-                       args->blkno = args->blkno2 = blk2->blkno;
+                       args->index = blk2->index;
+                       args->blkno = blk2->blkno;
+                       if (!state->extravalid) {
+                               /*
+                                * set the new attr location to match the old
+                                * one and let the higher level split code
+                                * decide where in the leaf to place it.
+                                */
+                               args->index2 = blk2->index;
+                               args->blkno2 = blk2->blkno;
+                       }
                }
        } else {
                ASSERT(state->inleaf == 1);
index 848ffa7..83d0cf3 100644 (file)
@@ -2437,6 +2437,7 @@ xfs_bmap_btalloc(
         * Normal allocation, done through xfs_alloc_vextent.
         */
        tryagain = isaligned = 0;
+       memset(&args, 0, sizeof(args));
        args.tp = ap->tp;
        args.mp = mp;
        args.fsbno = ap->blkno;
@@ -3082,6 +3083,7 @@ xfs_bmap_extents_to_btree(
         * Convert to a btree with two levels, one record in root.
         */
        XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
+       memset(&args, 0, sizeof(args));
        args.tp = tp;
        args.mp = mp;
        args.firstblock = *firstblock;
@@ -3237,6 +3239,7 @@ xfs_bmap_local_to_extents(
                xfs_buf_t       *bp;    /* buffer for extent block */
                xfs_bmbt_rec_host_t *ep;/* extent record pointer */
 
+               memset(&args, 0, sizeof(args));
                args.tp = tp;
                args.mp = ip->i_mount;
                args.firstblock = *firstblock;
@@ -4616,12 +4619,11 @@ xfs_bmapi_delay(
 
 
 STATIC int
-xfs_bmapi_allocate(
-       struct xfs_bmalloca     *bma,
-       int                     flags)
+__xfs_bmapi_allocate(
+       struct xfs_bmalloca     *bma)
 {
        struct xfs_mount        *mp = bma->ip->i_mount;
-       int                     whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
+       int                     whichfork = (bma->flags & XFS_BMAPI_ATTRFORK) ?
                                                XFS_ATTR_FORK : XFS_DATA_FORK;
        struct xfs_ifork        *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
        int                     tmp_logflags = 0;
@@ -4654,24 +4656,27 @@ xfs_bmapi_allocate(
         * Indicate if this is the first user data in the file, or just any
         * user data.
         */
-       if (!(flags & XFS_BMAPI_METADATA)) {
+       if (!(bma->flags & XFS_BMAPI_METADATA)) {
                bma->userdata = (bma->offset == 0) ?
                        XFS_ALLOC_INITIAL_USER_DATA : XFS_ALLOC_USERDATA;
        }
 
-       bma->minlen = (flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
+       bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
 
        /*
         * Only want to do the alignment at the eof if it is userdata and
         * allocation length is larger than a stripe unit.
         */
        if (mp->m_dalign && bma->length >= mp->m_dalign &&
-           !(flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
+           !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
                error = xfs_bmap_isaeof(bma, whichfork);
                if (error)
                        return error;
        }
 
+       if (bma->flags & XFS_BMAPI_STACK_SWITCH)
+               bma->stack_switch = 1;
+
        error = xfs_bmap_alloc(bma);
        if (error)
                return error;
@@ -4706,7 +4711,7 @@ xfs_bmapi_allocate(
         * A wasdelay extent has been initialized, so shouldn't be flagged
         * as unwritten.
         */
-       if (!bma->wasdel && (flags & XFS_BMAPI_PREALLOC) &&
+       if (!bma->wasdel && (bma->flags & XFS_BMAPI_PREALLOC) &&
            xfs_sb_version_hasextflgbit(&mp->m_sb))
                bma->got.br_state = XFS_EXT_UNWRITTEN;
 
@@ -4734,6 +4739,45 @@ xfs_bmapi_allocate(
        return 0;
 }
 
+static void
+xfs_bmapi_allocate_worker(
+       struct work_struct      *work)
+{
+       struct xfs_bmalloca     *args = container_of(work,
+                                               struct xfs_bmalloca, work);
+       unsigned long           pflags;
+
+       /* we are in a transaction context here */
+       current_set_flags_nested(&pflags, PF_FSTRANS);
+
+       args->result = __xfs_bmapi_allocate(args);
+       complete(args->done);
+
+       current_restore_flags_nested(&pflags, PF_FSTRANS);
+}
+
+/*
+ * Some allocation requests often come in with little stack to work on. Push
+ * them off to a worker thread so there is lots of stack to use. Otherwise just
+ * call directly to avoid the context switch overhead here.
+ */
+int
+xfs_bmapi_allocate(
+       struct xfs_bmalloca     *args)
+{
+       DECLARE_COMPLETION_ONSTACK(done);
+
+       if (!args->stack_switch)
+               return __xfs_bmapi_allocate(args);
+
+
+       args->done = &done;
+       INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
+       queue_work(xfs_alloc_wq, &args->work);
+       wait_for_completion(&done);
+       return args->result;
+}
+
 STATIC int
 xfs_bmapi_convert_unwritten(
        struct xfs_bmalloca     *bma,
@@ -4919,6 +4963,7 @@ xfs_bmapi_write(
                        bma.conv = !!(flags & XFS_BMAPI_CONVERT);
                        bma.wasdel = wasdelay;
                        bma.offset = bno;
+                       bma.flags = flags;
 
                        /*
                         * There's a 32/64 bit type mismatch between the
@@ -4934,7 +4979,7 @@ xfs_bmapi_write(
 
                        ASSERT(len > 0);
                        ASSERT(bma.length > 0);
-                       error = xfs_bmapi_allocate(&bma, flags);
+                       error = xfs_bmapi_allocate(&bma);
                        if (error)
                                goto error0;
                        if (bma.blkno == NULLFSBLOCK)
index 803b56d..5f469c3 100644 (file)
@@ -77,6 +77,7 @@ typedef       struct xfs_bmap_free
  * from written to unwritten, otherwise convert from unwritten to written.
  */
 #define XFS_BMAPI_CONVERT      0x040
+#define XFS_BMAPI_STACK_SWITCH 0x080
 
 #define XFS_BMAPI_FLAGS \
        { XFS_BMAPI_ENTIRE,     "ENTIRE" }, \
@@ -85,7 +86,8 @@ typedef       struct xfs_bmap_free
        { XFS_BMAPI_PREALLOC,   "PREALLOC" }, \
        { XFS_BMAPI_IGSTATE,    "IGSTATE" }, \
        { XFS_BMAPI_CONTIG,     "CONTIG" }, \
-       { XFS_BMAPI_CONVERT,    "CONVERT" }
+       { XFS_BMAPI_CONVERT,    "CONVERT" }, \
+       { XFS_BMAPI_STACK_SWITCH, "STACK_SWITCH" }
 
 
 static inline int xfs_bmapi_aflag(int w)
@@ -133,6 +135,11 @@ typedef struct xfs_bmalloca {
        char                    userdata;/* set if is user data */
        char                    aeof;   /* allocated space at eof */
        char                    conv;   /* overwriting unwritten extents */
+       char                    stack_switch;
+       int                     flags;
+       struct completion       *done;
+       struct work_struct      work;
+       int                     result;
 } xfs_bmalloca_t;
 
 /*
index 933b793..4b0b8dd 100644 (file)
@@ -1197,9 +1197,14 @@ xfs_buf_bio_end_io(
 {
        xfs_buf_t               *bp = (xfs_buf_t *)bio->bi_private;
 
-       xfs_buf_ioerror(bp, -error);
+       /*
+        * don't overwrite existing errors - otherwise we can lose errors on
+        * buffers that require multiple bios to complete.
+        */
+       if (!bp->b_error)
+               xfs_buf_ioerror(bp, -error);
 
-       if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
+       if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
                invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
 
        _xfs_buf_ioend(bp, 1);
@@ -1279,6 +1284,11 @@ next_chunk:
                if (size)
                        goto next_chunk;
        } else {
+               /*
+                * This is guaranteed not to be the last io reference count
+                * because the caller (xfs_buf_iorequest) holds a count itself.
+                */
+               atomic_dec(&bp->b_io_remaining);
                xfs_buf_ioerror(bp, EIO);
                bio_put(bio);
        }
index a8d0ed9..becf4a9 100644 (file)
@@ -526,7 +526,25 @@ xfs_buf_item_unpin(
                }
                xfs_buf_relse(bp);
        } else if (freed && remove) {
+               /*
+                * There are currently two references to the buffer - the active
+                * LRU reference and the buf log item. What we are about to do
+                * here - simulate a failed IO completion - requires 3
+                * references.
+                *
+                * The LRU reference is removed by the xfs_buf_stale() call. The
+                * buf item reference is removed by the xfs_buf_iodone()
+                * callback that is run by xfs_buf_do_callbacks() during ioend
+                * processing (via the bp->b_iodone callback), and then finally
+                * the ioend processing will drop the IO reference if the buffer
+                * is marked XBF_ASYNC.
+                *
+                * Hence we need to take an additional reference here so that IO
+                * completion processing doesn't free the buffer prematurely.
+                */
                xfs_buf_lock(bp);
+               xfs_buf_hold(bp);
+               bp->b_flags |= XBF_ASYNC;
                xfs_buf_ioerror(bp, EIO);
                XFS_BUF_UNDONE(bp);
                xfs_buf_stale(bp);
index c25b094..4beaede 100644 (file)
@@ -399,9 +399,26 @@ xfs_growfs_data_private(
 
        /* update secondary superblocks. */
        for (agno = 1; agno < nagcount; agno++) {
-               error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
+               error = 0;
+               /*
+                * new secondary superblocks need to be zeroed, not read from
+                * disk as the contents of the new area we are growing into is
+                * completely unknown.
+                */
+               if (agno < oagcount) {
+                       error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
                                  XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
                                  XFS_FSS_TO_BB(mp, 1), 0, &bp);
+               } else {
+                       bp = xfs_trans_get_buf(NULL, mp->m_ddev_targp,
+                                 XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
+                                 XFS_FSS_TO_BB(mp, 1), 0);
+                       if (bp)
+                               xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
+                       else
+                               error = ENOMEM;
+               }
+
                if (error) {
                        xfs_warn(mp,
                "error %d reading secondary superblock for ag %d",
@@ -423,7 +440,7 @@ xfs_growfs_data_private(
                        break; /* no point in continuing */
                }
        }
-       return 0;
+       return error;
 
  error0:
        xfs_trans_cancel(tp, XFS_TRANS_ABORT);
index 445bf1a..c5c4ef4 100644 (file)
@@ -250,6 +250,7 @@ xfs_ialloc_ag_alloc(
                                        /* boundary */
        struct xfs_perag *pag;
 
+       memset(&args, 0, sizeof(args));
        args.tp = tp;
        args.mp = tp->t_mountp;
 
index 2778258..1938b41 100644 (file)
@@ -1509,7 +1509,8 @@ xfs_ifree_cluster(
                 * to mark all the active inodes on the buffer stale.
                 */
                bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
-                                       mp->m_bsize * blks_per_cluster, 0);
+                                       mp->m_bsize * blks_per_cluster,
+                                       XBF_UNMAPPED);
 
                if (!bp)
                        return ENOMEM;
index 8305f2a..c1df3c6 100644 (file)
@@ -70,7 +70,7 @@ xfs_find_handle(
        int                     hsize;
        xfs_handle_t            handle;
        struct inode            *inode;
-       struct fd               f;
+       struct fd               f = {0};
        struct path             path;
        int                     error;
        struct xfs_inode        *ip;
index 973dff6..7f53766 100644 (file)
@@ -584,7 +584,9 @@ xfs_iomap_write_allocate(
                         * pointer that the caller gave to us.
                         */
                        error = xfs_bmapi_write(tp, ip, map_start_fsb,
-                                               count_fsb, 0, &first_block, 1,
+                                               count_fsb,
+                                               XFS_BMAPI_STACK_SWITCH,
+                                               &first_block, 1,
                                                imap, &nimaps, &free_list);
                        if (error)
                                goto trans_cancel;
index 7f4f937..4dad756 100644 (file)
@@ -2387,14 +2387,27 @@ xlog_state_do_callback(
 
 
                                /*
-                                * update the last_sync_lsn before we drop the
+                                * Completion of a iclog IO does not imply that
+                                * a transaction has completed, as transactions
+                                * can be large enough to span many iclogs. We
+                                * cannot change the tail of the log half way
+                                * through a transaction as this may be the only
+                                * transaction in the log and moving th etail to
+                                * point to the middle of it will prevent
+                                * recovery from finding the start of the
+                                * transaction. Hence we should only update the
+                                * last_sync_lsn if this iclog contains
+                                * transaction completion callbacks on it.
+                                *
+                                * We have to do this before we drop the
                                 * icloglock to ensure we are the only one that
                                 * can update it.
                                 */
                                ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
                                        be64_to_cpu(iclog->ic_header.h_lsn)) <= 0);
-                               atomic64_set(&log->l_last_sync_lsn,
-                                       be64_to_cpu(iclog->ic_header.h_lsn));
+                               if (iclog->ic_callback)
+                                       atomic64_set(&log->l_last_sync_lsn,
+                                               be64_to_cpu(iclog->ic_header.h_lsn));
 
                        } else
                                ioerrors++;
index 5da3ace..d308749 100644 (file)
@@ -3541,7 +3541,7 @@ xlog_do_recovery_pass(
                                 *   - order is important.
                                 */
                                error = xlog_bread_offset(log, 0,
-                                               bblks - split_bblks, hbp,
+                                               bblks - split_bblks, dbp,
                                                offset + BBTOB(split_bblks));
                                if (error)
                                        goto bread_err2;
index 3538eda..00d78b5 100644 (file)
@@ -920,12 +920,6 @@ extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
 extern void drm_mode_connector_list_update(struct drm_connector *connector);
 extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
                                                struct edid *edid);
-extern int drm_connector_property_set_value(struct drm_connector *connector,
-                                        struct drm_property *property,
-                                        uint64_t value);
-extern int drm_connector_property_get_value(struct drm_connector *connector,
-                                        struct drm_property *property,
-                                        uint64_t *value);
 extern int drm_object_property_set_value(struct drm_mode_object *obj,
                                         struct drm_property *property,
                                         uint64_t val);
@@ -947,8 +941,6 @@ extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
 extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
 extern bool drm_crtc_in_use(struct drm_crtc *crtc);
 
-extern void drm_connector_attach_property(struct drm_connector *connector,
-                                         struct drm_property *property, uint64_t init_val);
 extern void drm_object_attach_property(struct drm_mode_object *obj,
                                       struct drm_property *property,
                                       uint64_t init_val);
index c09d367..e8e1417 100644 (file)
 #define MODE_I2C_READ  4
 #define MODE_I2C_STOP  8
 
+/**
+ * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp
+ *                              aux algorithm
+ * @running: set by the algo indicating whether an i2c is ongoing or whether
+ *          the i2c bus is quiescent
+ * @address: i2c target address for the currently ongoing transfer
+ * @aux_ch: driver callback to transfer a single byte of the i2c payload
+ */
 struct i2c_algo_dp_aux_data {
        bool running;
        u16 address;
index 3650d5d..fce2ef3 100644 (file)
@@ -61,5 +61,19 @@ extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
 extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
 extern void drm_ht_remove(struct drm_open_hash *ht);
 
+/*
+ * RCU-safe interface
+ *
+ * The user of this API needs to make sure that two or more instances of the
+ * hash table manipulation functions are never run simultaneously.
+ * The lookup function drm_ht_find_item_rcu may, however, run simultaneously
+ * with any of the manipulation functions as long as it's called from within
+ * an RCU read-locked section.
+ */
+#define drm_ht_insert_item_rcu drm_ht_insert_item
+#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please
+#define drm_ht_remove_key_rcu drm_ht_remove_key
+#define drm_ht_remove_item_rcu drm_ht_remove_item
+#define drm_ht_find_item_rcu drm_ht_find_item
 
 #endif
index af1cbaf..c5c35e6 100644 (file)
        {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
index 3c13a3a..808dad2 100644 (file)
@@ -85,4 +85,30 @@ struct exynos_drm_hdmi_pdata {
        int (*get_hpd)(void);
 };
 
+/**
+ * Platform Specific Structure for DRM based IPP.
+ *
+ * @inv_pclk: if set 1. invert pixel clock
+ * @inv_vsync: if set 1. invert vsync signal for wb
+ * @inv_href: if set 1. invert href signal
+ * @inv_hsync: if set 1. invert hsync signal for wb
+ */
+struct exynos_drm_ipp_pol {
+       unsigned int inv_pclk;
+       unsigned int inv_vsync;
+       unsigned int inv_href;
+       unsigned int inv_hsync;
+};
+
+/**
+ * Platform Specific Structure for DRM based FIMC.
+ *
+ * @pol: current hardware block polarity settings.
+ * @clk_rate: current hardware clock rate.
+ */
+struct exynos_drm_fimc_pdata {
+       struct exynos_drm_ipp_pol pol;
+       int clk_rate;
+};
+
 #endif /* _EXYNOS_DRM_H_ */
index c6cae73..3cb5d84 100644 (file)
@@ -337,7 +337,6 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
  * @bo: The buffer object.
  * @placement: Proposed placement for the buffer object.
  * @interruptible: Sleep interruptible if sleeping.
- * @no_wait_reserve: Return immediately if other buffers are busy.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  *
  * Changes placement and caching policy of the buffer object
@@ -350,7 +349,7 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
  */
 extern int ttm_bo_validate(struct ttm_buffer_object *bo,
                                struct ttm_placement *placement,
-                               bool interruptible, bool no_wait_reserve,
+                               bool interruptible,
                                bool no_wait_gpu);
 
 /**
index dd96442..e3a43a4 100644 (file)
@@ -394,7 +394,7 @@ struct ttm_bo_driver {
         */
        int (*move) (struct ttm_buffer_object *bo,
                     bool evict, bool interruptible,
-                    bool no_wait_reserve, bool no_wait_gpu,
+                    bool no_wait_gpu,
                     struct ttm_mem_reg *new_mem);
 
        /**
@@ -703,7 +703,6 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
  * @proposed_placement: Proposed new placement for the buffer object.
  * @mem: A struct ttm_mem_reg.
  * @interruptible: Sleep interruptible when sliping.
- * @no_wait_reserve: Return immediately if other buffers are busy.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  *
  * Allocate memory space for the buffer object pointed to by @bo, using
@@ -719,7 +718,7 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                                struct ttm_placement *placement,
                                struct ttm_mem_reg *mem,
                                bool interruptible,
-                               bool no_wait_reserve, bool no_wait_gpu);
+                               bool no_wait_gpu);
 
 extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
                           struct ttm_mem_reg *mem);
@@ -901,7 +900,6 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @evict: 1: This is an eviction. Don't try to pipeline.
- * @no_wait_reserve: Return immediately if other buffers are busy.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
@@ -916,15 +914,14 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
  */
 
 extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-                          bool evict, bool no_wait_reserve,
-                          bool no_wait_gpu, struct ttm_mem_reg *new_mem);
+                          bool evict, bool no_wait_gpu,
+                          struct ttm_mem_reg *new_mem);
 
 /**
  * ttm_bo_move_memcpy
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @evict: 1: This is an eviction. Don't try to pipeline.
- * @no_wait_reserve: Return immediately if other buffers are busy.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
@@ -939,8 +936,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
  */
 
 extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-                             bool evict, bool no_wait_reserve,
-                             bool no_wait_gpu, struct ttm_mem_reg *new_mem);
+                             bool evict, bool no_wait_gpu,
+                             struct ttm_mem_reg *new_mem);
 
 /**
  * ttm_bo_free_old_node
@@ -957,7 +954,6 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
  * @bo: A pointer to a struct ttm_buffer_object.
  * @sync_obj: A sync object that signals when moving is complete.
  * @evict: This is an evict move. Don't return until the buffer is idle.
- * @no_wait_reserve: Return immediately if other buffers are busy.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
@@ -971,8 +967,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
 
 extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                                     void *sync_obj,
-                                    bool evict, bool no_wait_reserve,
-                                    bool no_wait_gpu,
+                                    bool evict, bool no_wait_gpu,
                                     struct ttm_mem_reg *new_mem);
 /**
  * ttm_io_prot
index c127315..f9f5e9e 100644 (file)
@@ -335,8 +335,8 @@ const char *__clk_get_name(struct clk *clk);
 struct clk_hw *__clk_get_hw(struct clk *clk);
 u8 __clk_get_num_parents(struct clk *clk);
 struct clk *__clk_get_parent(struct clk *clk);
-inline int __clk_get_enable_count(struct clk *clk);
-inline int __clk_get_prepare_count(struct clk *clk);
+int __clk_get_enable_count(struct clk *clk);
+int __clk_get_prepare_count(struct clk *clk);
 unsigned long __clk_get_rate(struct clk *clk);
 unsigned long __clk_get_flags(struct clk *clk);
 int __clk_is_enabled(struct clk *clk);
index f83f793..c8e1831 100644 (file)
@@ -17,6 +17,7 @@ enum dma_attr {
        DMA_ATTR_NON_CONSISTENT,
        DMA_ATTR_NO_KERNEL_MAPPING,
        DMA_ATTR_SKIP_CPU_SYNC,
+       DMA_ATTR_FORCE_CONTIGUOUS,
        DMA_ATTR_MAX,
 };
 
index df804ba..92a0dc7 100644 (file)
@@ -34,6 +34,7 @@ struct omap_i2c_bus_platform_data {
        u32             clkrate;
        u32             rev;
        u32             flags;
+       void            (*set_mpu_wkup_lat)(struct device *dev, long set);
 };
 
 #endif
index fa06804..bcaab4e 100644 (file)
@@ -1684,9 +1684,5 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
 static inline bool page_is_guard(struct page *page) { return false; }
 #endif /* CONFIG_DEBUG_PAGEALLOC */
 
-extern void reset_zone_present_pages(void);
-extern void fixup_zone_present_pages(int nid, unsigned long start_pfn,
-                               unsigned long end_pfn);
-
 #endif /* __KERNEL__ */
 #endif /* _LINUX_MM_H */
index 7c6a113..9653166 100644 (file)
@@ -137,7 +137,7 @@ struct dw_mci {
 
        dma_addr_t              sg_dma;
        void                    *sg_cpu;
-       struct dw_mci_dma_ops   *dma_ops;
+       const struct dw_mci_dma_ops     *dma_ops;
 #ifdef CONFIG_MMC_DW_IDMAC
        unsigned int            ring_size;
 #else
@@ -162,7 +162,7 @@ struct dw_mci {
        u16                     data_offset;
        struct device           *dev;
        struct dw_mci_board     *pdata;
-       struct dw_mci_drv_data  *drv_data;
+       const struct dw_mci_drv_data    *drv_data;
        void                    *priv;
        struct clk              *biu_clk;
        struct clk              *ciu_clk;
@@ -186,7 +186,7 @@ struct dw_mci {
 
        struct regulator        *vmmc;  /* Power regulator */
        unsigned long           irq_flags; /* IRQ flags */
-       unsigned int            irq;
+       int                     irq;
 };
 
 /* DMA ops for Internal/External DMAC interface */
index fa8529a..1edcb4d 100644 (file)
@@ -91,6 +91,7 @@ struct sdhci_host {
        unsigned int quirks2;   /* More deviations from spec. */
 
 #define SDHCI_QUIRK2_HOST_OFF_CARD_ON                  (1<<0)
+#define SDHCI_QUIRK2_HOST_NO_CMD23                     (1<<1)
 
        int irq;                /* Device IRQ */
        void __iomem *ioaddr;   /* Mapped address */
index 50aaca8..a23923b 100644 (file)
@@ -752,7 +752,7 @@ extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
                                     unsigned long size,
                                     enum memmap_context context);
 
-extern void lruvec_init(struct lruvec *lruvec, struct zone *zone);
+extern void lruvec_init(struct lruvec *lruvec);
 
 static inline struct zone *lruvec_zone(struct lruvec *lruvec)
 {
index a1984dd..0506eb5 100644 (file)
@@ -28,11 +28,13 @@ static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
 #endif
 
 #else /* CONFIG_OF_ADDRESS */
+#ifndef of_address_to_resource
 static inline int of_address_to_resource(struct device_node *dev, int index,
                                         struct resource *r)
 {
        return -EINVAL;
 }
+#endif
 static inline struct device_node *of_find_matching_node_by_address(
                                        struct device_node *from,
                                        const struct of_device_id *matches,
@@ -40,10 +42,12 @@ static inline struct device_node *of_find_matching_node_by_address(
 {
        return NULL;
 }
+#ifndef of_iomap
 static inline void __iomem *of_iomap(struct device_node *device, int index)
 {
        return NULL;
 }
+#endif
 static inline const __be32 *of_get_address(struct device_node *dev, int index,
                                        u64 *size, unsigned int *flags)
 {
diff --git a/include/linux/platform_data/omap_ocp2scp.h b/include/linux/platform_data/omap_ocp2scp.h
new file mode 100644 (file)
index 0000000..5c6c393
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * omap_ocp2scp.h -- ocp2scp header file
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_OMAP_OCP2SCP_H
+#define __DRIVERS_OMAP_OCP2SCP_H
+
+struct omap_ocp2scp_dev {
+       const char                      *drv_name;
+       struct resource                 *res;
+};
+
+struct omap_ocp2scp_platform_data {
+       int                             dev_cnt;
+       struct omap_ocp2scp_dev         **devices;
+};
+#endif /* __DRIVERS_OMAP_OCP2SCP_H */
index f2dc6d8..38a9935 100644 (file)
@@ -54,7 +54,8 @@ struct ptp_clock_request {
  * clock operations
  *
  * @adjfreq:  Adjusts the frequency of the hardware clock.
- *            parameter delta: Desired period change in parts per billion.
+ *            parameter delta: Desired frequency offset from nominal frequency
+ *            in parts per billion
  *
  * @adjtime:  Shifts the time of the hardware clock.
  *            parameter delta: Desired change in nanoseconds.
index 4187da5..a3e7842 100644 (file)
@@ -275,9 +275,11 @@ struct rio_id_table {
  * struct rio_net - RIO network info
  * @node: Node in global list of RIO networks
  * @devices: List of devices in this network
+ * @switches: List of switches in this netowrk
  * @mports: List of master ports accessing this network
  * @hport: Default port for accessing this network
  * @id: RIO network ID
+ * @destid_table: destID allocation table
  */
 struct rio_net {
        struct list_head node;  /* node in list of networks */
index c64de9d..2f694f3 100644 (file)
@@ -46,8 +46,9 @@ struct ads7846_platform_data {
        u16     debounce_rep;           /* additional consecutive good readings
                                         * required after the first two */
        int     gpio_pendown;           /* the GPIO used to decide the pendown
-                                        * state if get_pendown_state == NULL
-                                        */
+                                        * state if get_pendown_state == NULL */
+       int     gpio_pendown_debounce;  /* platform specific debounce time for
+                                        * the gpio_pendown */
        int     (*get_pendown_state)(void);
        int     (*filter_init)  (const struct ads7846_platform_data *pdata,
                                 void **filter_data);
index 6f0ba01..63445ed 100644 (file)
@@ -1351,7 +1351,7 @@ struct xfrm6_tunnel {
 };
 
 extern void xfrm_init(void);
-extern void xfrm4_init(int rt_hash_size);
+extern void xfrm4_init(void);
 extern int xfrm_state_init(struct net *net);
 extern void xfrm_state_fini(struct net *net);
 extern void xfrm4_state_init(void);
index 88fae8d..55367b0 100644 (file)
@@ -135,6 +135,8 @@ struct scsi_device {
                                     * because we did a bus reset. */
        unsigned use_10_for_rw:1; /* first try 10-byte read / write */
        unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */
+       unsigned no_report_opcodes:1;   /* no REPORT SUPPORTED OPERATION CODES */
+       unsigned no_write_same:1;       /* no WRITE SAME command */
        unsigned skip_ms_page_8:1;      /* do not use MODE SENSE page 0x08 */
        unsigned skip_ms_page_3f:1;     /* do not use MODE SENSE page 0x3f */
        unsigned skip_vpd_pages:1;      /* do not read VPD pages */
@@ -362,6 +364,8 @@ extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout,
                                int retries, struct scsi_sense_hdr *sshdr);
 extern int scsi_get_vpd_page(struct scsi_device *, u8 page, unsigned char *buf,
                             int buf_len);
+extern int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
+                             unsigned int len, unsigned char opcode);
 extern int scsi_device_set_state(struct scsi_device *sdev,
                                 enum scsi_device_state state);
 extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
index c0494d5..e7f52c3 100644 (file)
@@ -133,17 +133,26 @@ struct drm_exynos_g2d_cmd {
        __u32   data;
 };
 
+enum drm_exynos_g2d_buf_type {
+       G2D_BUF_USERPTR = 1 << 31,
+};
+
 enum drm_exynos_g2d_event_type {
        G2D_EVENT_NOT,
        G2D_EVENT_NONSTOP,
        G2D_EVENT_STOP,         /* not yet */
 };
 
+struct drm_exynos_g2d_userptr {
+       unsigned long userptr;
+       unsigned long size;
+};
+
 struct drm_exynos_g2d_set_cmdlist {
        __u64                                   cmd;
-       __u64                                   cmd_gem;
+       __u64                                   cmd_buf;
        __u32                                   cmd_nr;
-       __u32                                   cmd_gem_nr;
+       __u32                                   cmd_buf_nr;
 
        /* for g2d event */
        __u64                                   event_type;
@@ -154,6 +163,170 @@ struct drm_exynos_g2d_exec {
        __u64                                   async;
 };
 
+enum drm_exynos_ops_id {
+       EXYNOS_DRM_OPS_SRC,
+       EXYNOS_DRM_OPS_DST,
+       EXYNOS_DRM_OPS_MAX,
+};
+
+struct drm_exynos_sz {
+       __u32   hsize;
+       __u32   vsize;
+};
+
+struct drm_exynos_pos {
+       __u32   x;
+       __u32   y;
+       __u32   w;
+       __u32   h;
+};
+
+enum drm_exynos_flip {
+       EXYNOS_DRM_FLIP_NONE = (0 << 0),
+       EXYNOS_DRM_FLIP_VERTICAL = (1 << 0),
+       EXYNOS_DRM_FLIP_HORIZONTAL = (1 << 1),
+};
+
+enum drm_exynos_degree {
+       EXYNOS_DRM_DEGREE_0,
+       EXYNOS_DRM_DEGREE_90,
+       EXYNOS_DRM_DEGREE_180,
+       EXYNOS_DRM_DEGREE_270,
+};
+
+enum drm_exynos_planer {
+       EXYNOS_DRM_PLANAR_Y,
+       EXYNOS_DRM_PLANAR_CB,
+       EXYNOS_DRM_PLANAR_CR,
+       EXYNOS_DRM_PLANAR_MAX,
+};
+
+/**
+ * A structure for ipp supported property list.
+ *
+ * @version: version of this structure.
+ * @ipp_id: id of ipp driver.
+ * @count: count of ipp driver.
+ * @writeback: flag of writeback supporting.
+ * @flip: flag of flip supporting.
+ * @degree: flag of degree information.
+ * @csc: flag of csc supporting.
+ * @crop: flag of crop supporting.
+ * @scale: flag of scale supporting.
+ * @refresh_min: min hz of refresh.
+ * @refresh_max: max hz of refresh.
+ * @crop_min: crop min resolution.
+ * @crop_max: crop max resolution.
+ * @scale_min: scale min resolution.
+ * @scale_max: scale max resolution.
+ */
+struct drm_exynos_ipp_prop_list {
+       __u32   version;
+       __u32   ipp_id;
+       __u32   count;
+       __u32   writeback;
+       __u32   flip;
+       __u32   degree;
+       __u32   csc;
+       __u32   crop;
+       __u32   scale;
+       __u32   refresh_min;
+       __u32   refresh_max;
+       __u32   reserved;
+       struct drm_exynos_sz    crop_min;
+       struct drm_exynos_sz    crop_max;
+       struct drm_exynos_sz    scale_min;
+       struct drm_exynos_sz    scale_max;
+};
+
+/**
+ * A structure for ipp config.
+ *
+ * @ops_id: property of operation directions.
+ * @flip: property of mirror, flip.
+ * @degree: property of rotation degree.
+ * @fmt: property of image format.
+ * @sz: property of image size.
+ * @pos: property of image position(src-cropped,dst-scaler).
+ */
+struct drm_exynos_ipp_config {
+       enum drm_exynos_ops_id ops_id;
+       enum drm_exynos_flip    flip;
+       enum drm_exynos_degree  degree;
+       __u32   fmt;
+       struct drm_exynos_sz    sz;
+       struct drm_exynos_pos   pos;
+};
+
+enum drm_exynos_ipp_cmd {
+       IPP_CMD_NONE,
+       IPP_CMD_M2M,
+       IPP_CMD_WB,
+       IPP_CMD_OUTPUT,
+       IPP_CMD_MAX,
+};
+
+/**
+ * A structure for ipp property.
+ *
+ * @config: source, destination config.
+ * @cmd: definition of command.
+ * @ipp_id: id of ipp driver.
+ * @prop_id: id of property.
+ * @refresh_rate: refresh rate.
+ */
+struct drm_exynos_ipp_property {
+       struct drm_exynos_ipp_config config[EXYNOS_DRM_OPS_MAX];
+       enum drm_exynos_ipp_cmd cmd;
+       __u32   ipp_id;
+       __u32   prop_id;
+       __u32   refresh_rate;
+};
+
+enum drm_exynos_ipp_buf_type {
+       IPP_BUF_ENQUEUE,
+       IPP_BUF_DEQUEUE,
+};
+
+/**
+ * A structure for ipp buffer operations.
+ *
+ * @ops_id: operation directions.
+ * @buf_type: definition of buffer.
+ * @prop_id: id of property.
+ * @buf_id: id of buffer.
+ * @handle: Y, Cb, Cr each planar handle.
+ * @user_data: user data.
+ */
+struct drm_exynos_ipp_queue_buf {
+       enum drm_exynos_ops_id  ops_id;
+       enum drm_exynos_ipp_buf_type    buf_type;
+       __u32   prop_id;
+       __u32   buf_id;
+       __u32   handle[EXYNOS_DRM_PLANAR_MAX];
+       __u32   reserved;
+       __u64   user_data;
+};
+
+enum drm_exynos_ipp_ctrl {
+       IPP_CTRL_PLAY,
+       IPP_CTRL_STOP,
+       IPP_CTRL_PAUSE,
+       IPP_CTRL_RESUME,
+       IPP_CTRL_MAX,
+};
+
+/**
+ * A structure for ipp start/stop operations.
+ *
+ * @prop_id: id of property.
+ * @ctrl: definition of control.
+ */
+struct drm_exynos_ipp_cmd_ctrl {
+       __u32   prop_id;
+       enum drm_exynos_ipp_ctrl        ctrl;
+};
+
 #define DRM_EXYNOS_GEM_CREATE          0x00
 #define DRM_EXYNOS_GEM_MAP_OFFSET      0x01
 #define DRM_EXYNOS_GEM_MMAP            0x02
@@ -166,6 +339,12 @@ struct drm_exynos_g2d_exec {
 #define DRM_EXYNOS_G2D_SET_CMDLIST     0x21
 #define DRM_EXYNOS_G2D_EXEC            0x22
 
+/* IPP - Image Post Processing */
+#define DRM_EXYNOS_IPP_GET_PROPERTY    0x30
+#define DRM_EXYNOS_IPP_SET_PROPERTY    0x31
+#define DRM_EXYNOS_IPP_QUEUE_BUF       0x32
+#define DRM_EXYNOS_IPP_CMD_CTRL        0x33
+
 #define DRM_IOCTL_EXYNOS_GEM_CREATE            DRM_IOWR(DRM_COMMAND_BASE + \
                DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
 
@@ -188,8 +367,18 @@ struct drm_exynos_g2d_exec {
 #define DRM_IOCTL_EXYNOS_G2D_EXEC              DRM_IOWR(DRM_COMMAND_BASE + \
                DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec)
 
+#define DRM_IOCTL_EXYNOS_IPP_GET_PROPERTY      DRM_IOWR(DRM_COMMAND_BASE + \
+               DRM_EXYNOS_IPP_GET_PROPERTY, struct drm_exynos_ipp_prop_list)
+#define DRM_IOCTL_EXYNOS_IPP_SET_PROPERTY      DRM_IOWR(DRM_COMMAND_BASE + \
+               DRM_EXYNOS_IPP_SET_PROPERTY, struct drm_exynos_ipp_property)
+#define DRM_IOCTL_EXYNOS_IPP_QUEUE_BUF DRM_IOWR(DRM_COMMAND_BASE + \
+               DRM_EXYNOS_IPP_QUEUE_BUF, struct drm_exynos_ipp_queue_buf)
+#define DRM_IOCTL_EXYNOS_IPP_CMD_CTRL          DRM_IOWR(DRM_COMMAND_BASE + \
+               DRM_EXYNOS_IPP_CMD_CTRL, struct drm_exynos_ipp_cmd_ctrl)
+
 /* EXYNOS specific events */
 #define DRM_EXYNOS_G2D_EVENT           0x80000000
+#define DRM_EXYNOS_IPP_EVENT           0x80000001
 
 struct drm_exynos_g2d_event {
        struct drm_event        base;
@@ -200,4 +389,14 @@ struct drm_exynos_g2d_event {
        __u32                   reserved;
 };
 
+struct drm_exynos_ipp_event {
+       struct drm_event        base;
+       __u64                   user_data;
+       __u32                   tv_sec;
+       __u32                   tv_usec;
+       __u32                   prop_id;
+       __u32                   reserved;
+       __u32                   buf_id[EXYNOS_DRM_OPS_MAX];
+};
+
 #endif /* _UAPI_EXYNOS_DRM_H_ */
index 4766c0f..eeda917 100644 (file)
@@ -913,9 +913,11 @@ struct drm_radeon_gem_va {
 /* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
 #define RADEON_CS_KEEP_TILING_FLAGS 0x01
 #define RADEON_CS_USE_VM            0x02
+#define RADEON_CS_END_OF_FRAME      0x04 /* a hint from userspace which CS is the last one */
 /* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */
 #define RADEON_CS_RING_GFX          0
 #define RADEON_CS_RING_COMPUTE      1
+#define RADEON_CS_RING_DMA          2
 /* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */
 /* 0 = normal, + = higher priority, - = lower priority */
 
@@ -966,6 +968,10 @@ struct drm_radeon_cs {
 #define RADEON_INFO_MAX_PIPES          0x10
 /* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */
 #define RADEON_INFO_TIMESTAMP          0x11
+/* max shader engines (SE) - needed for geometry shaders, etc. */
+#define RADEON_INFO_MAX_SE             0x12
+/* max SH per SE */
+#define RADEON_INFO_MAX_SH_PER_SE      0x13
 
 struct drm_radeon_info {
        uint32_t                request;
index 8c99ce7..2c267bc 100644 (file)
@@ -25,7 +25,6 @@
 #define EPOLL_CTL_ADD 1
 #define EPOLL_CTL_DEL 2
 #define EPOLL_CTL_MOD 3
-#define EPOLL_CTL_DISABLE 4
 
 /*
  * Request the handling of system wakeup events so as to prevent system suspends
index a49c4af..b29272d 100644 (file)
@@ -8,4 +8,13 @@
 #define OOM_SCORE_ADJ_MIN      (-1000)
 #define OOM_SCORE_ADJ_MAX      1000
 
+/*
+ * /proc/<pid>/oom_adj set to -17 protects from the oom killer for legacy
+ * purposes.
+ */
+#define OOM_DISABLE (-17)
+/* inclusive */
+#define OOM_ADJUST_MIN (-16)
+#define OOM_ADJUST_MAX 15
+
 #endif /* _UAPI__INCLUDE_LINUX_OOM_H */
index b193fa2..13e43e4 100644 (file)
@@ -5,6 +5,36 @@
 #include <xen/interface/hvm/params.h>
 #include <asm/xen/hypercall.h>
 
+static const char *param_name(int op)
+{
+#define PARAM(x) [HVM_PARAM_##x] = #x
+       static const char *const names[] = {
+               PARAM(CALLBACK_IRQ),
+               PARAM(STORE_PFN),
+               PARAM(STORE_EVTCHN),
+               PARAM(PAE_ENABLED),
+               PARAM(IOREQ_PFN),
+               PARAM(BUFIOREQ_PFN),
+               PARAM(TIMER_MODE),
+               PARAM(HPET_ENABLED),
+               PARAM(IDENT_PT),
+               PARAM(DM_DOMAIN),
+               PARAM(ACPI_S_STATE),
+               PARAM(VM86_TSS),
+               PARAM(VPT_ALIGN),
+               PARAM(CONSOLE_PFN),
+               PARAM(CONSOLE_EVTCHN),
+       };
+#undef PARAM
+
+       if (op >= ARRAY_SIZE(names))
+               return "unknown";
+
+       if (!names[op])
+               return "reserved";
+
+       return names[op];
+}
 static inline int hvm_get_parameter(int idx, uint64_t *value)
 {
        struct xen_hvm_param xhv;
@@ -14,8 +44,8 @@ static inline int hvm_get_parameter(int idx, uint64_t *value)
        xhv.index = idx;
        r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
        if (r < 0) {
-               printk(KERN_ERR "Cannot get hvm parameter %d: %d!\n",
-                       idx, r);
+               printk(KERN_ERR "Cannot get hvm parameter %s (%d): %d!\n",
+                       param_name(idx), idx, r);
                return r;
        }
        *value = xhv.value;
index 3717e7b..20ef219 100644 (file)
@@ -716,7 +716,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
                                struct futex_pi_state **ps,
                                struct task_struct *task, int set_waiters)
 {
-       int lock_taken, ret, ownerdied = 0;
+       int lock_taken, ret, force_take = 0;
        u32 uval, newval, curval, vpid = task_pid_vnr(task);
 
 retry:
@@ -755,17 +755,15 @@ retry:
        newval = curval | FUTEX_WAITERS;
 
        /*
-        * There are two cases, where a futex might have no owner (the
-        * owner TID is 0): OWNER_DIED. We take over the futex in this
-        * case. We also do an unconditional take over, when the owner
-        * of the futex died.
-        *
-        * This is safe as we are protected by the hash bucket lock !
+        * Should we force take the futex? See below.
         */
-       if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
-               /* Keep the OWNER_DIED bit */
+       if (unlikely(force_take)) {
+               /*
+                * Keep the OWNER_DIED and the WAITERS bit and set the
+                * new TID value.
+                */
                newval = (curval & ~FUTEX_TID_MASK) | vpid;
-               ownerdied = 0;
+               force_take = 0;
                lock_taken = 1;
        }
 
@@ -775,7 +773,7 @@ retry:
                goto retry;
 
        /*
-        * We took the lock due to owner died take over.
+        * We took the lock due to forced take over.
         */
        if (unlikely(lock_taken))
                return 1;
@@ -790,20 +788,25 @@ retry:
                switch (ret) {
                case -ESRCH:
                        /*
-                        * No owner found for this futex. Check if the
-                        * OWNER_DIED bit is set to figure out whether
-                        * this is a robust futex or not.
+                        * We failed to find an owner for this
+                        * futex. So we have no pi_state to block
+                        * on. This can happen in two cases:
+                        *
+                        * 1) The owner died
+                        * 2) A stale FUTEX_WAITERS bit
+                        *
+                        * Re-read the futex value.
                         */
                        if (get_futex_value_locked(&curval, uaddr))
                                return -EFAULT;
 
                        /*
-                        * We simply start over in case of a robust
-                        * futex. The code above will take the futex
-                        * and return happy.
+                        * If the owner died or we have a stale
+                        * WAITERS bit the owner TID in the user space
+                        * futex is 0.
                         */
-                       if (curval & FUTEX_OWNER_DIED) {
-                               ownerdied = 1;
+                       if (!(curval & FUTEX_TID_MASK)) {
+                               force_take = 1;
                                goto retry;
                        }
                default:
index 6085f5e..6e48c3a 100644 (file)
@@ -2293,12 +2293,17 @@ static void layout_symtab(struct module *mod, struct load_info *info)
        src = (void *)info->hdr + symsect->sh_offset;
        nsrc = symsect->sh_size / sizeof(*src);
 
+       /* strtab always starts with a nul, so offset 0 is the empty string. */
+       strtab_size = 1;
+
        /* Compute total space required for the core symbols' strtab. */
-       for (ndst = i = strtab_size = 1; i < nsrc; ++i, ++src)
-               if (is_core_symbol(src, info->sechdrs, info->hdr->e_shnum)) {
-                       strtab_size += strlen(&info->strtab[src->st_name]) + 1;
+       for (ndst = i = 0; i < nsrc; i++) {
+               if (i == 0 ||
+                   is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
+                       strtab_size += strlen(&info->strtab[src[i].st_name])+1;
                        ndst++;
                }
+       }
 
        /* Append room for core symbols at end of core part. */
        info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
@@ -2332,15 +2337,15 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
        mod->core_symtab = dst = mod->module_core + info->symoffs;
        mod->core_strtab = s = mod->module_core + info->stroffs;
        src = mod->symtab;
-       *dst = *src;
        *s++ = 0;
-       for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
-               if (!is_core_symbol(src, info->sechdrs, info->hdr->e_shnum))
-                       continue;
-
-               dst[ndst] = *src;
-               dst[ndst++].st_name = s - mod->core_strtab;
-               s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
+       for (ndst = i = 0; i < mod->num_symtab; i++) {
+               if (i == 0 ||
+                   is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
+                       dst[ndst] = src[i];
+                       dst[ndst++].st_name = s - mod->core_strtab;
+                       s += strlcpy(s, &mod->strtab[src[i].st_name],
+                                    KSYM_NAME_LEN) + 1;
+               }
        }
        mod->core_num_syms = ndst;
 }
index 678ce4f..095ab15 100644 (file)
@@ -641,7 +641,14 @@ do { \
        **************  MIPS  *****************
        ***************************************/
 #if defined(__mips__) && W_TYPE_SIZE == 32
-#if __GNUC__ > 2 || __GNUC_MINOR__ >= 7
+#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
+#define umul_ppmm(w1, w0, u, v)                        \
+do {                                           \
+       UDItype __ll = (UDItype)(u) * (v);      \
+       w1 = __ll >> 32;                        \
+       w0 = __ll;                              \
+} while (0)
+#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7
 #define umul_ppmm(w1, w0, u, v) \
        __asm__ ("multu %2,%3" \
        : "=l" ((USItype)(w0)), \
@@ -666,7 +673,15 @@ do { \
        **************  MIPS/64  **************
        ***************************************/
 #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
-#if __GNUC__ > 2 || __GNUC_MINOR__ >= 7
+#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
+#define umul_ppmm(w1, w0, u, v) \
+do {                                                                   \
+       typedef unsigned int __ll_UTItype __attribute__((mode(TI)));    \
+       __ll_UTItype __ll = (__ll_UTItype)(u) * (v);                    \
+       w1 = __ll >> 64;                                                \
+       w0 = __ll;                                                      \
+} while (0)
+#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7
 #define umul_ppmm(w1, w0, u, v) \
        __asm__ ("dmultu %2,%3" \
        : "=l" ((UDItype)(w0)), \
index 434be4a..f468185 100644 (file)
@@ -198,8 +198,6 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
                        int order = ilog2(BITS_PER_LONG);
 
                        __free_pages_bootmem(pfn_to_page(start), order);
-                       fixup_zone_present_pages(page_to_nid(pfn_to_page(start)),
-                                       start, start + BITS_PER_LONG);
                        count += BITS_PER_LONG;
                        start += BITS_PER_LONG;
                } else {
@@ -210,9 +208,6 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
                                if (vec & 1) {
                                        page = pfn_to_page(start + off);
                                        __free_pages_bootmem(page, 0);
-                                       fixup_zone_present_pages(
-                                               page_to_nid(page),
-                                               start + off, start + off + 1);
                                        count++;
                                }
                                vec >>= 1;
@@ -226,11 +221,8 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
        pages = bdata->node_low_pfn - bdata->node_min_pfn;
        pages = bootmem_bootmap_pages(pages);
        count += pages;
-       while (pages--) {
-               fixup_zone_present_pages(page_to_nid(page),
-                               page_to_pfn(page), page_to_pfn(page) + 1);
+       while (pages--)
                __free_pages_bootmem(page++, 0);
-       }
 
        bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
 
index d517cd1..2da13a5 100644 (file)
@@ -98,7 +98,7 @@ struct page *kmap_to_page(void *vaddr)
 {
        unsigned long addr = (unsigned long)vaddr;
 
-       if (addr >= PKMAP_ADDR(0) && addr <= PKMAP_ADDR(LAST_PKMAP)) {
+       if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) {
                int i = (addr - PKMAP_ADDR(0)) >> PAGE_SHIFT;
                return pte_page(pkmap_page_table[i]);
        }
index 7acf43b..dd39ba0 100644 (file)
@@ -1055,12 +1055,24 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
                                      struct mem_cgroup *memcg)
 {
        struct mem_cgroup_per_zone *mz;
+       struct lruvec *lruvec;
 
-       if (mem_cgroup_disabled())
-               return &zone->lruvec;
+       if (mem_cgroup_disabled()) {
+               lruvec = &zone->lruvec;
+               goto out;
+       }
 
        mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
-       return &mz->lruvec;
+       lruvec = &mz->lruvec;
+out:
+       /*
+        * Since a node can be onlined after the mem_cgroup was created,
+        * we have to be prepared to initialize lruvec->zone here;
+        * and if offlined then reonlined, we need to reinitialize it.
+        */
+       if (unlikely(lruvec->zone != zone))
+               lruvec->zone = zone;
+       return lruvec;
 }
 
 /*
@@ -1087,9 +1099,12 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
        struct mem_cgroup_per_zone *mz;
        struct mem_cgroup *memcg;
        struct page_cgroup *pc;
+       struct lruvec *lruvec;
 
-       if (mem_cgroup_disabled())
-               return &zone->lruvec;
+       if (mem_cgroup_disabled()) {
+               lruvec = &zone->lruvec;
+               goto out;
+       }
 
        pc = lookup_page_cgroup(page);
        memcg = pc->mem_cgroup;
@@ -1107,7 +1122,16 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
                pc->mem_cgroup = memcg = root_mem_cgroup;
 
        mz = page_cgroup_zoneinfo(memcg, page);
-       return &mz->lruvec;
+       lruvec = &mz->lruvec;
+out:
+       /*
+        * Since a node can be onlined after the mem_cgroup was created,
+        * we have to be prepared to initialize lruvec->zone here;
+        * and if offlined then reonlined, we need to reinitialize it.
+        */
+       if (unlikely(lruvec->zone != zone))
+               lruvec->zone = zone;
+       return lruvec;
 }
 
 /**
@@ -1452,17 +1476,26 @@ static int mem_cgroup_count_children(struct mem_cgroup *memcg)
 static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
 {
        u64 limit;
-       u64 memsw;
 
        limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
-       limit += total_swap_pages << PAGE_SHIFT;
 
-       memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
        /*
-        * If memsw is finite and limits the amount of swap space available
-        * to this memcg, return that limit.
+        * Do not consider swap space if we cannot swap due to swappiness
         */
-       return min(limit, memsw);
+       if (mem_cgroup_swappiness(memcg)) {
+               u64 memsw;
+
+               limit += total_swap_pages << PAGE_SHIFT;
+               memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
+
+               /*
+                * If memsw is finite and limits the amount of swap space
+                * available to this memcg, return that limit.
+                */
+               limit = min(limit, memsw);
+       }
+
+       return limit;
 }
 
 void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
@@ -3688,17 +3721,17 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
 static bool mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
                                int node, int zid, enum lru_list lru)
 {
-       struct mem_cgroup_per_zone *mz;
+       struct lruvec *lruvec;
        unsigned long flags, loop;
        struct list_head *list;
        struct page *busy;
        struct zone *zone;
 
        zone = &NODE_DATA(node)->node_zones[zid];
-       mz = mem_cgroup_zoneinfo(memcg, node, zid);
-       list = &mz->lruvec.lists[lru];
+       lruvec = mem_cgroup_zone_lruvec(zone, memcg);
+       list = &lruvec->lists[lru];
 
-       loop = mz->lru_size[lru];
+       loop = mem_cgroup_get_lru_size(lruvec, lru);
        /* give some margin against EBUSY etc...*/
        loop += 256;
        busy = NULL;
@@ -4736,7 +4769,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
 
        for (zone = 0; zone < MAX_NR_ZONES; zone++) {
                mz = &pn->zoneinfo[zone];
-               lruvec_init(&mz->lruvec, &NODE_DATA(node)->node_zones[zone]);
+               lruvec_init(&mz->lruvec);
                mz->usage_in_excess = 0;
                mz->on_tree = false;
                mz->memcg = memcg;
index fb135ba..221fc9f 100644 (file)
@@ -2527,9 +2527,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        int ret = 0;
        int page_mkwrite = 0;
        struct page *dirty_page = NULL;
-       unsigned long mmun_start;       /* For mmu_notifiers */
-       unsigned long mmun_end;         /* For mmu_notifiers */
-       bool mmun_called = false;       /* For mmu_notifiers */
+       unsigned long mmun_start = 0;   /* For mmu_notifiers */
+       unsigned long mmun_end = 0;     /* For mmu_notifiers */
 
        old_page = vm_normal_page(vma, address, orig_pte);
        if (!old_page) {
@@ -2708,8 +2707,7 @@ gotten:
                goto oom_free_new;
 
        mmun_start  = address & PAGE_MASK;
-       mmun_end    = (address & PAGE_MASK) + PAGE_SIZE;
-       mmun_called = true;
+       mmun_end    = mmun_start + PAGE_SIZE;
        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
 
        /*
@@ -2778,7 +2776,7 @@ gotten:
                page_cache_release(new_page);
 unlock:
        pte_unmap_unlock(page_table, ptl);
-       if (mmun_called)
+       if (mmun_end > mmun_start)
                mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
        if (old_page) {
                /*
index 56b758a..e4eeaca 100644 (file)
@@ -106,7 +106,6 @@ static void get_page_bootmem(unsigned long info,  struct page *page,
 void __ref put_page_bootmem(struct page *page)
 {
        unsigned long type;
-       struct zone *zone;
 
        type = (unsigned long) page->lru.next;
        BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
@@ -117,12 +116,6 @@ void __ref put_page_bootmem(struct page *page)
                set_page_private(page, 0);
                INIT_LIST_HEAD(&page->lru);
                __free_pages_bootmem(page, 0);
-
-               zone = page_zone(page);
-               zone_span_writelock(zone);
-               zone->present_pages++;
-               zone_span_writeunlock(zone);
-               totalram_pages++;
        }
 
 }
index 2d94235..9a796c4 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -334,8 +334,10 @@ void validate_mm(struct mm_struct *mm)
        struct vm_area_struct *vma = mm->mmap;
        while (vma) {
                struct anon_vma_chain *avc;
+               vma_lock_anon_vma(vma);
                list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
                        anon_vma_interval_tree_verify(avc);
+               vma_unlock_anon_vma(vma);
                vma = vma->vm_next;
                i++;
        }
index 3cef80f..4596d81 100644 (file)
@@ -87,7 +87,7 @@ int memmap_valid_within(unsigned long pfn,
 }
 #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
 
-void lruvec_init(struct lruvec *lruvec, struct zone *zone)
+void lruvec_init(struct lruvec *lruvec)
 {
        enum lru_list lru;
 
@@ -95,8 +95,4 @@ void lruvec_init(struct lruvec *lruvec, struct zone *zone)
 
        for_each_lru(lru)
                INIT_LIST_HEAD(&lruvec->lists[lru]);
-
-#ifdef CONFIG_MEMCG
-       lruvec->zone = zone;
-#endif
 }
index 714d5d6..bd82f6b 100644 (file)
@@ -116,8 +116,6 @@ static unsigned long __init __free_memory_core(phys_addr_t start,
                return 0;
 
        __free_pages_memory(start_pfn, end_pfn);
-       fixup_zone_present_pages(pfn_to_nid(start >> PAGE_SHIFT),
-                       start_pfn, end_pfn);
 
        return end_pfn - start_pfn;
 }
@@ -128,7 +126,6 @@ unsigned long __init free_low_memory_core_early(int nodeid)
        phys_addr_t start, end, size;
        u64 i;
 
-       reset_zone_present_pages();
        for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL)
                count += __free_memory_core(start, end);
 
index 5b74de6..bcb72c6 100644 (file)
@@ -1405,7 +1405,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
 
        mt = get_pageblock_migratetype(page);
        if (unlikely(mt != MIGRATE_ISOLATE))
-               __mod_zone_freepage_state(zone, -(1UL << order), mt);
+               __mod_zone_freepage_state(zone, -(1UL << alloc_order), mt);
 
        if (alloc_order != order)
                expand(zone, page, alloc_order, order,
@@ -4505,7 +4505,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                zone->zone_pgdat = pgdat;
 
                zone_pcp_init(zone);
-               lruvec_init(&zone->lruvec, zone);
+               lruvec_init(&zone->lruvec);
                if (!size)
                        continue;
 
@@ -6098,37 +6098,3 @@ void dump_page(struct page *page)
        dump_page_flags(page->flags);
        mem_cgroup_print_bad_page(page);
 }
-
-/* reset zone->present_pages */
-void reset_zone_present_pages(void)
-{
-       struct zone *z;
-       int i, nid;
-
-       for_each_node_state(nid, N_HIGH_MEMORY) {
-               for (i = 0; i < MAX_NR_ZONES; i++) {
-                       z = NODE_DATA(nid)->node_zones + i;
-                       z->present_pages = 0;
-               }
-       }
-}
-
-/* calculate zone's present pages in buddy system */
-void fixup_zone_present_pages(int nid, unsigned long start_pfn,
-                               unsigned long end_pfn)
-{
-       struct zone *z;
-       unsigned long zone_start_pfn, zone_end_pfn;
-       int i;
-
-       for (i = 0; i < MAX_NR_ZONES; i++) {
-               z = NODE_DATA(nid)->node_zones + i;
-               zone_start_pfn = z->zone_start_pfn;
-               zone_end_pfn = zone_start_pfn + z->spanned_pages;
-
-               /* if the two regions intersect */
-               if (!(zone_start_pfn >= end_pfn || zone_end_pfn <= start_pfn))
-                       z->present_pages += min(end_pfn, zone_end_pfn) -
-                                           max(start_pfn, zone_start_pfn);
-       }
-}
index 67afba5..89341b6 100644 (file)
@@ -643,7 +643,7 @@ static void shmem_evict_inode(struct inode *inode)
                kfree(info->symlink);
 
        simple_xattrs_free(&info->xattrs);
-       BUG_ON(inode->i_blocks);
+       WARN_ON(inode->i_blocks);
        shmem_free_inode(inode->i_sb);
        clear_inode(inode);
 }
@@ -1145,8 +1145,20 @@ repeat:
                if (!error) {
                        error = shmem_add_to_page_cache(page, mapping, index,
                                                gfp, swp_to_radix_entry(swap));
-                       /* We already confirmed swap, and make no allocation */
-                       VM_BUG_ON(error);
+                       /*
+                        * We already confirmed swap under page lock, and make
+                        * no memory allocation here, so usually no possibility
+                        * of error; but free_swap_and_cache() only trylocks a
+                        * page, so it is just possible that the entry has been
+                        * truncated or holepunched since swap was confirmed.
+                        * shmem_undo_range() will have done some of the
+                        * unaccounting, now delete_from_swap_cache() will do
+                        * the rest (including mem_cgroup_uncharge_swapcache).
+                        * Reset swap.val? No, leave it so "failed" goes back to
+                        * "repeat": reading a hole and writing should succeed.
+                        */
+                       if (error)
+                               delete_from_swap_cache(page);
                }
                if (error)
                        goto failed;
index 71cd288..f91a255 100644 (file)
@@ -1494,9 +1494,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
        BUG_ON(!current->mm);
 
        pathname = getname(specialfile);
-       err = PTR_ERR(pathname);
        if (IS_ERR(pathname))
-               goto out;
+               return PTR_ERR(pathname);
 
        victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
        err = PTR_ERR(victim);
@@ -1608,6 +1607,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
 out_dput:
        filp_close(victim, NULL);
 out:
+       putname(pathname);
        return err;
 }
 
index 2624edc..48550c6 100644 (file)
@@ -1760,28 +1760,6 @@ static bool in_reclaim_compaction(struct scan_control *sc)
        return false;
 }
 
-#ifdef CONFIG_COMPACTION
-/*
- * If compaction is deferred for sc->order then scale the number of pages
- * reclaimed based on the number of consecutive allocation failures
- */
-static unsigned long scale_for_compaction(unsigned long pages_for_compaction,
-                       struct lruvec *lruvec, struct scan_control *sc)
-{
-       struct zone *zone = lruvec_zone(lruvec);
-
-       if (zone->compact_order_failed <= sc->order)
-               pages_for_compaction <<= zone->compact_defer_shift;
-       return pages_for_compaction;
-}
-#else
-static unsigned long scale_for_compaction(unsigned long pages_for_compaction,
-                       struct lruvec *lruvec, struct scan_control *sc)
-{
-       return pages_for_compaction;
-}
-#endif
-
 /*
  * Reclaim/compaction is used for high-order allocation requests. It reclaims
  * order-0 pages before compacting the zone. should_continue_reclaim() returns
@@ -1829,9 +1807,6 @@ static inline bool should_continue_reclaim(struct lruvec *lruvec,
         * inactive lists are large enough, continue reclaiming
         */
        pages_for_compaction = (2UL << sc->order);
-
-       pages_for_compaction = scale_for_compaction(pages_for_compaction,
-                                                   lruvec, sc);
        inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE);
        if (nr_swap_pages > 0)
                inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON);
@@ -3017,6 +2992,8 @@ static int kswapd(void *p)
                                                &balanced_classzone_idx);
                }
        }
+
+       current->reclaim_state = NULL;
        return 0;
 }
 
index b9a28d2..ce0684a 100644 (file)
@@ -325,6 +325,12 @@ void batadv_interface_rx(struct net_device *soft_iface,
 
        soft_iface->last_rx = jiffies;
 
+       /* Let the bridge loop avoidance check the packet. If will
+        * not handle it, we can safely push it up.
+        */
+       if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
+               goto out;
+
        if (orig_node)
                batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
                                                     ethhdr->h_source);
@@ -332,12 +338,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
        if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
                goto dropped;
 
-       /* Let the bridge loop avoidance check the packet. If will
-        * not handle it, we can safely push it up.
-        */
-       if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
-               goto out;
-
        netif_rx(skb);
        goto out;
 
index 112edd3..baae715 100644 (file)
@@ -769,6 +769,12 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
                 */
                tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_TEMP;
 
+               /* the change can carry possible "attribute" flags like the
+                * TT_CLIENT_WIFI, therefore they have to be copied in the
+                * client entry
+                */
+               tt_global_entry->common.flags |= flags;
+
                /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
                 * one originator left in the list and we previously received a
                 * delete + roaming change for this originator.
@@ -1496,7 +1502,7 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
 
                        memcpy(tt_change->addr, tt_common_entry->addr,
                               ETH_ALEN);
-                       tt_change->flags = BATADV_NO_FLAGS;
+                       tt_change->flags = tt_common_entry->flags;
 
                        tt_count++;
                        tt_change++;
@@ -2450,6 +2456,13 @@ bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
 {
        bool ret = false;
 
+       /* if the originator is a backbone node (meaning it belongs to the same
+        * LAN of this node) the temporary client must not be added because to
+        * reach such destination the node must use the LAN instead of the mesh
+        */
+       if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
+               goto out;
+
        if (!batadv_tt_global_add(bat_priv, orig_node, addr,
                                  BATADV_TT_CLIENT_TEMP,
                                  atomic_read(&orig_node->last_ttvn)))
index 8a0ce70..a0a2f97 100644 (file)
@@ -1754,11 +1754,11 @@ int hci_register_dev(struct hci_dev *hdev)
        if (hdev->dev_type != HCI_AMP)
                set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
 
-       schedule_work(&hdev->power_on);
-
        hci_notify(hdev, HCI_DEV_REG);
        hci_dev_hold(hdev);
 
+       schedule_work(&hdev->power_on);
+
        return id;
 
 err_wqueue:
index aa2ea0a..91de423 100644 (file)
@@ -326,7 +326,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
        struct hci_dev *d;
        size_t rp_len;
        u16 count;
-       int i, err;
+       int err;
 
        BT_DBG("sock %p", sk);
 
@@ -347,9 +347,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
                return -ENOMEM;
        }
 
-       rp->num_controllers = cpu_to_le16(count);
-
-       i = 0;
+       count = 0;
        list_for_each_entry(d, &hci_dev_list, list) {
                if (test_bit(HCI_SETUP, &d->dev_flags))
                        continue;
@@ -357,10 +355,13 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
                if (!mgmt_valid_hdev(d))
                        continue;
 
-               rp->index[i++] = cpu_to_le16(d->id);
+               rp->index[count++] = cpu_to_le16(d->id);
                BT_DBG("Added hci%u", d->id);
        }
 
+       rp->num_controllers = cpu_to_le16(count);
+       rp_len = sizeof(*rp) + (2 * count);
+
        read_unlock(&hci_dev_list_lock);
 
        err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
@@ -1366,6 +1367,7 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
                        continue;
 
                list_del(&match->list);
+               kfree(match);
                found++;
        }
 
index 2ac8d50..a592337 100644 (file)
@@ -267,7 +267,7 @@ static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send)
 
        clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->flags);
        mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type,
-                        hcon->dst_type, reason);
+                        hcon->dst_type, HCI_ERROR_AUTH_FAILURE);
 
        cancel_delayed_work_sync(&conn->security_timer);
 
index 09cb3f6..c0946cb 100644 (file)
@@ -1666,7 +1666,7 @@ static inline int deliver_skb(struct sk_buff *skb,
 
 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
 {
-       if (ptype->af_packet_priv == NULL)
+       if (!ptype->af_packet_priv || !skb->sk)
                return false;
 
        if (ptype->id_match)
@@ -2818,8 +2818,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
                if (unlikely(tcpu != next_cpu) &&
                    (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
                     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
-                     rflow->last_qtail)) >= 0))
+                     rflow->last_qtail)) >= 0)) {
+                       tcpu = next_cpu;
                        rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
+               }
 
                if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
                        *rflowp = rflow;
index 87cc17d..b079c7b 100644 (file)
@@ -319,7 +319,8 @@ int dev_addr_del(struct net_device *dev, const unsigned char *addr,
         */
        ha = list_first_entry(&dev->dev_addrs.list,
                              struct netdev_hw_addr, list);
-       if (ha->addr == dev->dev_addr && ha->refcount == 1)
+       if (!memcmp(ha->addr, addr, dev->addr_len) &&
+           ha->type == addr_type && ha->refcount == 1)
                return -ENOENT;
 
        err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
index bcf02f6..017a8ba 100644 (file)
@@ -429,6 +429,17 @@ static struct attribute_group netstat_group = {
        .name  = "statistics",
        .attrs  = netstat_attrs,
 };
+
+#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
+static struct attribute *wireless_attrs[] = {
+       NULL
+};
+
+static struct attribute_group wireless_group = {
+       .name = "wireless",
+       .attrs = wireless_attrs,
+};
+#endif
 #endif /* CONFIG_SYSFS */
 
 #ifdef CONFIG_RPS
@@ -1409,6 +1420,15 @@ int netdev_register_kobject(struct net_device *net)
                groups++;
 
        *groups++ = &netstat_group;
+
+#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
+       if (net->ieee80211_ptr)
+               *groups++ = &wireless_group;
+#if IS_ENABLED(CONFIG_WIRELESS_EXT)
+       else if (net->wireless_handlers)
+               *groups++ = &wireless_group;
+#endif
+#endif
 #endif /* CONFIG_SYSFS */
 
        error = device_add(dev);
index 76d4c2c..fad649a 100644 (file)
@@ -2192,7 +2192,8 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
                        goto skip;
 
                err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
-                                             portid, seq, 0, NTF_SELF);
+                                             portid, seq,
+                                             RTM_NEWNEIGH, NTF_SELF);
                if (err < 0)
                        return err;
 skip:
index 535584c..0c34bfa 100644 (file)
@@ -892,13 +892,16 @@ static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
                struct inet_diag_req_v2 *r, struct nlattr *bc)
 {
        const struct inet_diag_handler *handler;
+       int err = 0;
 
        handler = inet_diag_lock_handler(r->sdiag_protocol);
        if (!IS_ERR(handler))
                handler->dump(skb, cb, r, bc);
+       else
+               err = PTR_ERR(handler);
        inet_diag_unlock_handler(handler);
 
-       return skb->len;
+       return err ? : skb->len;
 }
 
 static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
index 5eea4a8..14bbfcf 100644 (file)
@@ -457,19 +457,28 @@ static int do_ip_setsockopt(struct sock *sk, int level,
        struct inet_sock *inet = inet_sk(sk);
        int val = 0, err;
 
-       if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) |
-                            (1<<IP_RECVOPTS) | (1<<IP_RECVTOS) |
-                            (1<<IP_RETOPTS) | (1<<IP_TOS) |
-                            (1<<IP_TTL) | (1<<IP_HDRINCL) |
-                            (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
-                            (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
-                            (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
-                            (1<<IP_MINTTL) | (1<<IP_NODEFRAG))) ||
-           optname == IP_UNICAST_IF ||
-           optname == IP_MULTICAST_TTL ||
-           optname == IP_MULTICAST_ALL ||
-           optname == IP_MULTICAST_LOOP ||
-           optname == IP_RECVORIGDSTADDR) {
+       switch (optname) {
+       case IP_PKTINFO:
+       case IP_RECVTTL:
+       case IP_RECVOPTS:
+       case IP_RECVTOS:
+       case IP_RETOPTS:
+       case IP_TOS:
+       case IP_TTL:
+       case IP_HDRINCL:
+       case IP_MTU_DISCOVER:
+       case IP_RECVERR:
+       case IP_ROUTER_ALERT:
+       case IP_FREEBIND:
+       case IP_PASSSEC:
+       case IP_TRANSPARENT:
+       case IP_MINTTL:
+       case IP_NODEFRAG:
+       case IP_UNICAST_IF:
+       case IP_MULTICAST_TTL:
+       case IP_MULTICAST_ALL:
+       case IP_MULTICAST_LOOP:
+       case IP_RECVORIGDSTADDR:
                if (optlen >= sizeof(int)) {
                        if (get_user(val, (int __user *) optval))
                                return -EFAULT;
index 1831092..858fddf 100644 (file)
@@ -338,12 +338,17 @@ static int vti_rcv(struct sk_buff *skb)
        if (tunnel != NULL) {
                struct pcpu_tstats *tstats;
 
+               if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+                       return -1;
+
                tstats = this_cpu_ptr(tunnel->dev->tstats);
                u64_stats_update_begin(&tstats->syncp);
                tstats->rx_packets++;
                tstats->rx_bytes += skb->len;
                u64_stats_update_end(&tstats->syncp);
 
+               skb->mark = 0;
+               secpath_reset(skb);
                skb->dev = tunnel->dev;
                return 1;
        }
index a8c6512..df25142 100644 (file)
@@ -1785,6 +1785,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
        if (dev_out->flags & IFF_LOOPBACK)
                flags |= RTCF_LOCAL;
 
+       do_cache = true;
        if (type == RTN_BROADCAST) {
                flags |= RTCF_BROADCAST | RTCF_LOCAL;
                fi = NULL;
@@ -1793,6 +1794,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
                if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
                                     fl4->flowi4_proto))
                        flags &= ~RTCF_LOCAL;
+               else
+                       do_cache = false;
                /* If multicast route do not exist use
                 * default one, but do not gateway in this case.
                 * Yes, it is hack.
@@ -1802,8 +1805,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
        }
 
        fnhe = NULL;
-       do_cache = fi != NULL;
-       if (fi) {
+       do_cache &= fi != NULL;
+       if (do_cache) {
                struct rtable __rcu **prth;
                struct fib_nh *nh = &FIB_RES_NH(*res);
 
@@ -2597,7 +2600,7 @@ int __init ip_rt_init(void)
                pr_err("Unable to create route proc files\n");
 #ifdef CONFIG_XFRM
        xfrm_init();
-       xfrm4_init(ip_rt_max_size);
+       xfrm4_init();
 #endif
        rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
 
index 197c000..083092e 100644 (file)
@@ -1212,7 +1212,7 @@ new_segment:
 wait_for_sndbuf:
                        set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 wait_for_memory:
-                       if (copied && likely(!tp->repair))
+                       if (copied)
                                tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
 
                        if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
@@ -1223,7 +1223,7 @@ wait_for_memory:
        }
 
 out:
-       if (copied && likely(!tp->repair))
+       if (copied)
                tcp_push(sk, flags, mss_now, tp->nonagle);
        release_sock(sk);
        return copied + copied_syn;
index 2c2b13a..609ff98 100644 (file)
@@ -5313,11 +5313,6 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
                goto discard;
        }
 
-       /* ts_recent update must be made after we are sure that the packet
-        * is in window.
-        */
-       tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
-
        /* step 3: check security and precedence [ignored] */
 
        /* step 4: Check for a SYN
@@ -5552,6 +5547,11 @@ step5:
        if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
                goto discard;
 
+       /* ts_recent update must be made after we are sure that the packet
+        * is in window.
+        */
+       tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
+
        tcp_rcv_rtt_measure_ts(sk, skb);
 
        /* Process urgent data. */
@@ -6130,6 +6130,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        } else
                goto discard;
 
+       /* ts_recent update must be made after we are sure that the packet
+        * is in window.
+        */
+       tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
+
        /* step 6: check the URG bit */
        tcp_urg(sk, skb, th);
 
index 53bc584..f696d7c 100644 (file)
@@ -1,7 +1,6 @@
 #include <linux/rcupdate.h>
 #include <linux/spinlock.h>
 #include <linux/jiffies.h>
-#include <linux/bootmem.h>
 #include <linux/module.h>
 #include <linux/cache.h>
 #include <linux/slab.h>
@@ -9,6 +8,7 @@
 #include <linux/tcp.h>
 #include <linux/hash.h>
 #include <linux/tcp_metrics.h>
+#include <linux/vmalloc.h>
 
 #include <net/inet_connection_sock.h>
 #include <net/net_namespace.h>
@@ -1034,7 +1034,10 @@ static int __net_init tcp_net_metrics_init(struct net *net)
        net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
        size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
 
-       net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL);
+       net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+       if (!net->ipv4.tcp_metrics_hash)
+               net->ipv4.tcp_metrics_hash = vzalloc(size);
+
        if (!net->ipv4.tcp_metrics_hash)
                return -ENOMEM;
 
@@ -1055,7 +1058,10 @@ static void __net_exit tcp_net_metrics_exit(struct net *net)
                        tm = next;
                }
        }
-       kfree(net->ipv4.tcp_metrics_hash);
+       if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash))
+               vfree(net->ipv4.tcp_metrics_hash);
+       else
+               kfree(net->ipv4.tcp_metrics_hash);
 }
 
 static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
index cfe6ffe..2798706 100644 (file)
@@ -1986,6 +1986,9 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
                BUG_ON(!tso_segs);
 
+               if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE)
+                       goto repair; /* Skip network transmission */
+
                cwnd_quota = tcp_cwnd_test(tp, skb);
                if (!cwnd_quota)
                        break;
@@ -2026,6 +2029,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
                        break;
 
+repair:
                /* Advance the send_head.  This one is sent out.
                 * This call will increment packets_out.
                 */
index 05c5ab8..3be0ac2 100644 (file)
@@ -279,19 +279,8 @@ static void __exit xfrm4_policy_fini(void)
        xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo);
 }
 
-void __init xfrm4_init(int rt_max_size)
+void __init xfrm4_init(void)
 {
-       /*
-        * Select a default value for the gc_thresh based on the main route
-        * table hash size.  It seems to me the worst case scenario is when
-        * we have ipsec operating in transport mode, in which we create a
-        * dst_entry per socket.  The xfrm gc algorithm starts trying to remove
-        * entries at gc_thresh, and prevents new allocations as 2*gc_thresh
-        * so lets set an initial xfrm gc_thresh value at the rt_max_size/2.
-        * That will let us store an ipsec connection per route table entry,
-        * and start cleaning when were 1/2 full
-        */
-       xfrm4_dst_ops.gc_thresh = rt_max_size/2;
        dst_entries_init(&xfrm4_dst_ops);
 
        xfrm4_state_init();
index c4f9341..3064785 100644 (file)
@@ -252,6 +252,7 @@ struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu)
                return NULL;
        dst->ops->update_pmtu(dst, sk, NULL, mtu);
 
-       return inet6_csk_route_socket(sk, &fl6);
+       dst = inet6_csk_route_socket(sk, &fl6);
+       return IS_ERR(dst) ? NULL : dst;
 }
 EXPORT_SYMBOL_GPL(inet6_csk_update_pmtu);
index 0185679..d5cb3c4 100644 (file)
@@ -1633,9 +1633,9 @@ static size_t ip6gre_get_size(const struct net_device *dev)
                /* IFLA_GRE_OKEY */
                nla_total_size(4) +
                /* IFLA_GRE_LOCAL */
-               nla_total_size(4) +
+               nla_total_size(sizeof(struct in6_addr)) +
                /* IFLA_GRE_REMOTE */
-               nla_total_size(4) +
+               nla_total_size(sizeof(struct in6_addr)) +
                /* IFLA_GRE_TTL */
                nla_total_size(1) +
                /* IFLA_GRE_TOS */
@@ -1659,8 +1659,8 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
            nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
            nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
            nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
-           nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->raddr) ||
-           nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->laddr) ||
+           nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->laddr) ||
+           nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->raddr) ||
            nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
            /*nla_put_u8(skb, IFLA_GRE_TOS, t->priority) ||*/
            nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
index ba6d13d..e02faed 100644 (file)
@@ -827,6 +827,7 @@ pref_skip_coa:
                if (val < 0 || val > 255)
                        goto e_inval;
                np->min_hopcount = val;
+               retv = 0;
                break;
        case IPV6_DONTFRAG:
                np->dontfrag = valbool;
index ff36194..2edce30 100644 (file)
@@ -535,7 +535,7 @@ static void ndisc_send_unsol_na(struct net_device *dev)
 {
        struct inet6_dev *idev;
        struct inet6_ifaddr *ifa;
-       struct in6_addr mcaddr;
+       struct in6_addr mcaddr = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
 
        idev = in6_dev_get(dev);
        if (!idev)
@@ -543,7 +543,6 @@ static void ndisc_send_unsol_na(struct net_device *dev)
 
        read_lock_bh(&idev->lock);
        list_for_each_entry(ifa, &idev->addr_list, if_list) {
-               addrconf_addr_solict_mult(&ifa->addr, &mcaddr);
                ndisc_send_na(dev, NULL, &mcaddr, &ifa->addr,
                              /*router=*/ !!idev->cnf.forwarding,
                              /*solicited=*/ false, /*override=*/ true,
index 05f3a31..7371f67 100644 (file)
@@ -2594,6 +2594,9 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
                else
                        local->probe_req_reg--;
 
+               if (!local->open_count)
+                       break;
+
                ieee80211_queue_work(&local->hw, &local->reconfig_filter);
                break;
        default:
index bf87c70..c21e33d 100644 (file)
@@ -1151,10 +1151,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
 
        mutex_lock(&sdata->u.ibss.mtx);
 
-       sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH;
-       memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
-       sdata->u.ibss.ssid_len = 0;
-
        active_ibss = ieee80211_sta_active_ibss(sdata);
 
        if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
@@ -1175,6 +1171,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
                }
        }
 
+       ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
+       memset(ifibss->bssid, 0, ETH_ALEN);
+       ifibss->ssid_len = 0;
+
        sta_info_flush(sdata->local, sdata);
 
        spin_lock_bh(&ifibss->incomplete_lock);
index 8c80455..156e583 100644 (file)
@@ -1314,6 +1314,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
                                         struct net_device *dev);
 netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                                       struct net_device *dev);
+void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
+                             struct sk_buff_head *skbs);
 
 /* HT */
 void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
index c80c449..f57f597 100644 (file)
@@ -871,8 +871,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                                local->hw.wiphy->cipher_suites,
                                sizeof(u32) * local->hw.wiphy->n_cipher_suites,
                                GFP_KERNEL);
-                       if (!suites)
-                               return -ENOMEM;
+                       if (!suites) {
+                               result = -ENOMEM;
+                               goto fail_wiphy_register;
+                       }
                        for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) {
                                u32 suite = local->hw.wiphy->cipher_suites[r];
                                if (suite == WLAN_CIPHER_SUITE_WEP40 ||
index c4cdbde..43e60b5 100644 (file)
@@ -917,7 +917,7 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
                                       struct cfg80211_sched_scan_request *req)
 {
        struct ieee80211_local *local = sdata->local;
-       struct ieee80211_sched_scan_ies sched_scan_ies;
+       struct ieee80211_sched_scan_ies sched_scan_ies = {};
        int ret, i;
 
        mutex_lock(&local->mtx);
index 0a4e4c0..d2eb64e 100644 (file)
@@ -117,8 +117,8 @@ static void free_sta_work(struct work_struct *wk)
 
        for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
                local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
-               __skb_queue_purge(&sta->ps_tx_buf[ac]);
-               __skb_queue_purge(&sta->tx_filtered[ac]);
+               ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]);
+               ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]);
        }
 
 #ifdef CONFIG_MAC80211_MESH
@@ -141,7 +141,7 @@ static void free_sta_work(struct work_struct *wk)
                tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]);
                if (!tid_tx)
                        continue;
-               __skb_queue_purge(&tid_tx->pending);
+               ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending);
                kfree(tid_tx);
        }
 
@@ -961,6 +961,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
        struct ieee80211_local *local = sdata->local;
        struct sk_buff_head pending;
        int filtered = 0, buffered = 0, ac;
+       unsigned long flags;
 
        clear_sta_flag(sta, WLAN_STA_SP);
 
@@ -976,12 +977,16 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
        for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
                int count = skb_queue_len(&pending), tmp;
 
+               spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags);
                skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending);
+               spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags);
                tmp = skb_queue_len(&pending);
                filtered += tmp - count;
                count = tmp;
 
+               spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags);
                skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending);
+               spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags);
                tmp = skb_queue_len(&pending);
                buffered += tmp - count;
        }
index 3af0cc4..101eb88 100644 (file)
@@ -668,3 +668,12 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb)
        dev_kfree_skb_any(skb);
 }
 EXPORT_SYMBOL(ieee80211_free_txskb);
+
+void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
+                             struct sk_buff_head *skbs)
+{
+       struct sk_buff *skb;
+
+       while ((skb = __skb_dequeue(skbs)))
+               ieee80211_free_txskb(hw, skb);
+}
index c9bf83f..b858ebe 100644 (file)
@@ -1358,7 +1358,7 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
                if (tx->skb)
                        ieee80211_free_txskb(&tx->local->hw, tx->skb);
                else
-                       __skb_queue_purge(&tx->skbs);
+                       ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs);
                return -1;
        } else if (unlikely(res == TX_QUEUED)) {
                I802_DEBUG_INC(tx->local->tx_handlers_queued);
@@ -2120,10 +2120,13 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
  */
 void ieee80211_clear_tx_pending(struct ieee80211_local *local)
 {
+       struct sk_buff *skb;
        int i;
 
-       for (i = 0; i < local->hw.queues; i++)
-               skb_queue_purge(&local->pending[i]);
+       for (i = 0; i < local->hw.queues; i++) {
+               while ((skb = skb_dequeue(&local->pending[i])) != NULL)
+                       ieee80211_free_txskb(&local->hw, skb);
+       }
 }
 
 /*
index 2393918..0151ae3 100644 (file)
@@ -1491,6 +1491,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                list_for_each_entry(sdata, &local->interfaces, list) {
                        if (sdata->vif.type != NL80211_IFTYPE_STATION)
                                continue;
+                       if (!sdata->u.mgd.associated)
+                               continue;
 
                        ieee80211_send_nullfunc(local, sdata, 0);
                }
index ec3dba5..5c0b785 100644 (file)
@@ -173,6 +173,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
                return adtfn(set, &nip, timeout, flags);
        }
 
+       ip_to = ip;
        if (tb[IPSET_ATTR_IP_TO]) {
                ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
                if (ret)
@@ -185,8 +186,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
                if (!cidr || cidr > 32)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(ip, ip_to, cidr);
-       } else
-               ip_to = ip;
+       }
 
        hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
 
index 0171f75..6283351 100644 (file)
@@ -162,7 +162,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipport4_elem data = { };
-       u32 ip, ip_to = 0, p = 0, port, port_to;
+       u32 ip, ip_to, p = 0, port, port_to;
        u32 timeout = h->timeout;
        bool with_ports = false;
        int ret;
@@ -210,7 +210,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
                return ip_set_eexist(ret, flags) ? 0 : ret;
        }
 
-       ip = ntohl(data.ip);
+       ip_to = ip = ntohl(data.ip);
        if (tb[IPSET_ATTR_IP_TO]) {
                ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
                if (ret)
@@ -223,8 +223,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
                if (!cidr || cidr > 32)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(ip, ip_to, cidr);
-       } else
-               ip_to = ip;
+       }
 
        port_to = port = ntohs(data.port);
        if (with_ports && tb[IPSET_ATTR_PORT_TO]) {
index 6344ef5..6a21271 100644 (file)
@@ -166,7 +166,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportip4_elem data = { };
-       u32 ip, ip_to = 0, p = 0, port, port_to;
+       u32 ip, ip_to, p = 0, port, port_to;
        u32 timeout = h->timeout;
        bool with_ports = false;
        int ret;
@@ -218,7 +218,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
                return ip_set_eexist(ret, flags) ? 0 : ret;
        }
 
-       ip = ntohl(data.ip);
+       ip_to = ip = ntohl(data.ip);
        if (tb[IPSET_ATTR_IP_TO]) {
                ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
                if (ret)
@@ -231,8 +231,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
                if (!cidr || cidr > 32)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(ip, ip_to, cidr);
-       } else
-               ip_to = ip;
+       }
 
        port_to = port = ntohs(data.port);
        if (with_ports && tb[IPSET_ATTR_PORT_TO]) {
index cb71f9a..2d5cd4e 100644 (file)
@@ -215,8 +215,8 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportnet4_elem data = { .cidr = HOST_MASK - 1 };
-       u32 ip, ip_to = 0, p = 0, port, port_to;
-       u32 ip2_from = 0, ip2_to, ip2_last, ip2;
+       u32 ip, ip_to, p = 0, port, port_to;
+       u32 ip2_from, ip2_to, ip2_last, ip2;
        u32 timeout = h->timeout;
        bool with_ports = false;
        u8 cidr;
@@ -286,6 +286,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                return ip_set_eexist(ret, flags) ? 0 : ret;
        }
 
+       ip_to = ip;
        if (tb[IPSET_ATTR_IP_TO]) {
                ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
                if (ret)
@@ -306,6 +307,8 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                if (port > port_to)
                        swap(port, port_to);
        }
+
+       ip2_to = ip2_from;
        if (tb[IPSET_ATTR_IP2_TO]) {
                ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
                if (ret)
index 8847b4d..701c88a 100644 (file)
@@ -41,7 +41,8 @@ MODULE_DESCRIPTION("cttimeout: Extended Netfilter Connection Tracking timeout tu
 static LIST_HEAD(cttimeout_list);
 
 static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = {
-       [CTA_TIMEOUT_NAME]      = { .type = NLA_NUL_STRING },
+       [CTA_TIMEOUT_NAME]      = { .type = NLA_NUL_STRING,
+                                   .len  = CTNL_TIMEOUT_NAME_MAX - 1},
        [CTA_TIMEOUT_L3PROTO]   = { .type = NLA_U16 },
        [CTA_TIMEOUT_L4PROTO]   = { .type = NLA_U8 },
        [CTA_TIMEOUT_DATA]      = { .type = NLA_NESTED },
index cc10d07..9e8f4b2 100644 (file)
@@ -1210,7 +1210,7 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
        local->remote_miu = LLCP_DEFAULT_MIU;
        local->remote_lto = LLCP_DEFAULT_LTO;
 
-       list_add(&llcp_devices, &local->list);
+       list_add(&local->list, &llcp_devices);
 
        return 0;
 }
index f0dd83c..9687fa1 100644 (file)
  * grp->index is the index of the group; and grp->slot_shift
  * is the shift for the corresponding (scaled) sigma_i.
  */
-#define QFQ_MAX_INDEX          19
-#define QFQ_MAX_WSHIFT         16
+#define QFQ_MAX_INDEX          24
+#define QFQ_MAX_WSHIFT         12
 
 #define        QFQ_MAX_WEIGHT          (1<<QFQ_MAX_WSHIFT)
-#define QFQ_MAX_WSUM           (2*QFQ_MAX_WEIGHT)
+#define QFQ_MAX_WSUM           (16*QFQ_MAX_WEIGHT)
 
 #define FRAC_BITS              30      /* fixed point arithmetic */
 #define ONE_FP                 (1UL << FRAC_BITS)
 #define IWSUM                  (ONE_FP/QFQ_MAX_WSUM)
 
-#define QFQ_MTU_SHIFT          11
+#define QFQ_MTU_SHIFT          16      /* to support TSO/GSO */
 #define QFQ_MIN_SLOT_SHIFT     (FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX)
+#define QFQ_MIN_LMAX           256     /* min possible lmax for a class */
 
 /*
  * Possible group states.  These values are used as indexes for the bitmaps
@@ -231,6 +232,32 @@ static void qfq_update_class_params(struct qfq_sched *q, struct qfq_class *cl,
        q->wsum += delta_w;
 }
 
+static void qfq_update_reactivate_class(struct qfq_sched *q,
+                                       struct qfq_class *cl,
+                                       u32 inv_w, u32 lmax, int delta_w)
+{
+       bool need_reactivation = false;
+       int i = qfq_calc_index(inv_w, lmax);
+
+       if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) {
+               /*
+                * shift cl->F back, to not charge the
+                * class for the not-yet-served head
+                * packet
+                */
+               cl->F = cl->S;
+               /* remove class from its slot in the old group */
+               qfq_deactivate_class(q, cl);
+               need_reactivation = true;
+       }
+
+       qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
+
+       if (need_reactivation) /* activate in new group */
+               qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc));
+}
+
+
 static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
                            struct nlattr **tca, unsigned long *arg)
 {
@@ -238,7 +265,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        struct qfq_class *cl = (struct qfq_class *)*arg;
        struct nlattr *tb[TCA_QFQ_MAX + 1];
        u32 weight, lmax, inv_w;
-       int i, err;
+       int err;
        int delta_w;
 
        if (tca[TCA_OPTIONS] == NULL) {
@@ -270,16 +297,14 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 
        if (tb[TCA_QFQ_LMAX]) {
                lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
-               if (!lmax || lmax > (1UL << QFQ_MTU_SHIFT)) {
+               if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
                        pr_notice("qfq: invalid max length %u\n", lmax);
                        return -EINVAL;
                }
        } else
-               lmax = 1UL << QFQ_MTU_SHIFT;
+               lmax = psched_mtu(qdisc_dev(sch));
 
        if (cl != NULL) {
-               bool need_reactivation = false;
-
                if (tca[TCA_RATE]) {
                        err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
                                                    qdisc_root_sleeping_lock(sch),
@@ -291,24 +316,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
                if (lmax == cl->lmax && inv_w == cl->inv_w)
                        return 0; /* nothing to update */
 
-               i = qfq_calc_index(inv_w, lmax);
                sch_tree_lock(sch);
-               if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) {
-                       /*
-                        * shift cl->F back, to not charge the
-                        * class for the not-yet-served head
-                        * packet
-                        */
-                       cl->F = cl->S;
-                       /* remove class from its slot in the old group */
-                       qfq_deactivate_class(q, cl);
-                       need_reactivation = true;
-               }
-
-               qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
-
-               if (need_reactivation) /* activate in new group */
-                       qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc));
+               qfq_update_reactivate_class(q, cl, inv_w, lmax, delta_w);
                sch_tree_unlock(sch);
 
                return 0;
@@ -663,15 +672,48 @@ static void qfq_make_eligible(struct qfq_sched *q, u64 old_V)
 
 
 /*
- * XXX we should make sure that slot becomes less than 32.
- * This is guaranteed by the input values.
- * roundedS is always cl->S rounded on grp->slot_shift bits.
+ * If the weight and lmax (max_pkt_size) of the classes do not change,
+ * then QFQ guarantees that the slot index is never higher than
+ * 2 + ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) * (QFQ_MAX_WEIGHT/QFQ_MAX_WSUM).
+ *
+ * With the current values of the above constants, the index is
+ * then guaranteed to never be higher than 2 + 256 * (1 / 16) = 18.
+ *
+ * When the weight of a class is increased or the lmax of the class is
+ * decreased, a new class with smaller slot size may happen to be
+ * activated. The activation of this class should be properly delayed
+ * to when the service of the class has finished in the ideal system
+ * tracked by QFQ. If the activation of the class is not delayed to
+ * this reference time instant, then this class may be unjustly served
+ * before other classes waiting for service. This may cause
+ * (unfrequently) the above bound to the slot index to be violated for
+ * some of these unlucky classes.
+ *
+ * Instead of delaying the activation of the new class, which is quite
+ * complex, the following inaccurate but simple solution is used: if
+ * the slot index is higher than QFQ_MAX_SLOTS-2, then the timestamps
+ * of the class are shifted backward so as to let the slot index
+ * become equal to QFQ_MAX_SLOTS-2. This threshold is used because, if
+ * the slot index is above it, then the data structure implementing
+ * the bucket list either gets immediately corrupted or may get
+ * corrupted on a possible next packet arrival that causes the start
+ * time of the group to be shifted backward.
  */
 static void qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl,
                            u64 roundedS)
 {
        u64 slot = (roundedS - grp->S) >> grp->slot_shift;
-       unsigned int i = (grp->front + slot) % QFQ_MAX_SLOTS;
+       unsigned int i; /* slot index in the bucket list */
+
+       if (unlikely(slot > QFQ_MAX_SLOTS - 2)) {
+               u64 deltaS = roundedS - grp->S -
+                       ((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift);
+               cl->S -= deltaS;
+               cl->F -= deltaS;
+               slot = QFQ_MAX_SLOTS - 2;
+       }
+
+       i = (grp->front + slot) % QFQ_MAX_SLOTS;
 
        hlist_add_head(&cl->next, &grp->slots[i]);
        __set_bit(slot, &grp->full_slots);
@@ -892,6 +934,13 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        }
        pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
 
+       if (unlikely(cl->lmax < qdisc_pkt_len(skb))) {
+               pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
+                         cl->lmax, qdisc_pkt_len(skb), cl->common.classid);
+               qfq_update_reactivate_class(q, cl, cl->inv_w,
+                                           qdisc_pkt_len(skb), 0);
+       }
+
        err = qdisc_enqueue(skb, cl->qdisc);
        if (unlikely(err != NET_XMIT_SUCCESS)) {
                pr_debug("qfq_enqueue: enqueue failed %d\n", err);
index c3bea26..9966e7b 100644 (file)
@@ -102,7 +102,7 @@ static const struct file_operations sctp_snmp_seq_fops = {
        .open    = sctp_snmp_seq_open,
        .read    = seq_read,
        .llseek  = seq_lseek,
-       .release = single_release,
+       .release = single_release_net,
 };
 
 /* Set up the proc fs entry for 'snmp' object. */
@@ -251,7 +251,7 @@ static const struct file_operations sctp_eps_seq_fops = {
        .open    = sctp_eps_seq_open,
        .read    = seq_read,
        .llseek  = seq_lseek,
-       .release = seq_release,
+       .release = seq_release_net,
 };
 
 /* Set up the proc fs entry for 'eps' object. */
@@ -372,7 +372,7 @@ static const struct file_operations sctp_assocs_seq_fops = {
        .open    = sctp_assocs_seq_open,
        .read    = seq_read,
        .llseek  = seq_lseek,
-       .release = seq_release,
+       .release = seq_release_net,
 };
 
 /* Set up the proc fs entry for 'assocs' object. */
@@ -517,7 +517,7 @@ static const struct file_operations sctp_remaddr_seq_fops = {
        .open = sctp_remaddr_seq_open,
        .read = seq_read,
        .llseek = seq_lseek,
-       .release = seq_release,
+       .release = seq_release_net,
 };
 
 int __net_init sctp_remaddr_proc_init(struct net *net)
index 111ff83..b36f0fc 100644 (file)
@@ -116,7 +116,6 @@ void tipc_handler_stop(void)
                return;
 
        handler_enabled = 0;
-       tasklet_disable(&tipc_tasklet);
        tasklet_kill(&tipc_tasklet);
 
        spin_lock_bh(&qitem_lock);
index bcc7d7e..b75756b 100644 (file)
@@ -141,9 +141,8 @@ static const struct ieee80211_regdomain world_regdom = {
        .reg_rules = {
                /* IEEE 802.11b/g, channels 1..11 */
                REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
-               /* IEEE 802.11b/g, channels 12..13. No HT40
-                * channel fits here. */
-               REG_RULE(2467-10, 2472+10, 20, 6, 20,
+               /* IEEE 802.11b/g, channels 12..13. */
+               REG_RULE(2467-10, 2472+10, 40, 6, 20,
                        NL80211_RRF_PASSIVE_SCAN |
                        NL80211_RRF_NO_IBSS),
                /* IEEE 802.11 channel 14 - Only JP enables
index dda4b2b..ecbb447 100644 (file)
@@ -16,8 +16,9 @@ PHONY += $(modules)
 __modinst: $(modules)
        @:
 
+# Don't stop modules_install if we can't sign external modules.
 quiet_cmd_modules_install = INSTALL $@
-      cmd_modules_install = mkdir -p $(2); cp $@ $(2) ; $(mod_strip_cmd) $(2)/$(notdir $@) ; $(mod_sign_cmd) $(2)/$(notdir $@)
+      cmd_modules_install = mkdir -p $(2); cp $@ $(2) ; $(mod_strip_cmd) $(2)/$(notdir $@) ; $(mod_sign_cmd) $(2)/$(notdir $@) $(patsubst %,|| true,$(KBUILD_EXTMOD))
 
 # Modules built outside the kernel source tree go into extra by default
 INSTALL_MOD_DIR ?= extra
index 21a9f5d..f18750e 100755 (executable)
@@ -1890,8 +1890,10 @@ sub process {
                }
 
                if ($realfile =~ m@^(drivers/net/|net/)@ &&
-                   $rawline !~ m@^\+[ \t]*(\/\*|\*\/)@ &&
-                   $rawline =~ m@^\+[ \t]*.+\*\/[ \t]*$@) {
+                   $rawline !~ m@^\+[ \t]*\*/[ \t]*$@ &&       #trailing */
+                   $rawline !~ m@^\+.*/\*.*\*/[ \t]*$@ &&      #inline /*...*/
+                   $rawline !~ m@^\+.*\*{2,}/[ \t]*$@ &&       #trailing **/
+                   $rawline =~ m@^\+[ \t]*.+\*\/[ \t]*$@) {    #non blank */
                        WARN("NETWORKING_BLOCK_COMMENT_STYLE",
                             "networking block comments put the trailing */ on a separate line\n" . $herecurr);
                }
index bd2e098..cdd4860 100644 (file)
@@ -12,7 +12,7 @@ extern "C" {
 
 #include <assert.h>
 #include <stdio.h>
-#include <sys/queue.h>
+#include "list.h"
 #ifndef __cplusplus
 #include <stdbool.h>
 #endif
@@ -175,12 +175,11 @@ struct menu {
 #define MENU_ROOT              0x0002
 
 struct jump_key {
-       CIRCLEQ_ENTRY(jump_key) entries;
+       struct list_head entries;
        size_t offset;
        struct menu *target;
        int index;
 };
-CIRCLEQ_HEAD(jk_head, jump_key);
 
 #define JUMP_NB                        9
 
diff --git a/scripts/kconfig/list.h b/scripts/kconfig/list.h
new file mode 100644 (file)
index 0000000..0ae730b
--- /dev/null
@@ -0,0 +1,91 @@
+#ifndef LIST_H
+#define LIST_H
+
+/*
+ * Copied from include/linux/...
+ */
+
+#undef offsetof
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr:        the pointer to the member.
+ * @type:       the type of the container struct this is embedded in.
+ * @member:     the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({                      \
+       const typeof( ((type *)0)->member ) *__mptr = (ptr);    \
+       (type *)( (char *)__mptr - offsetof(type,member) );})
+
+
+struct list_head {
+       struct list_head *next, *prev;
+};
+
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+       struct list_head name = LIST_HEAD_INIT(name)
+
+/**
+ * list_entry - get the struct for this entry
+ * @ptr:       the &struct list_head pointer.
+ * @type:      the type of the struct this is embedded in.
+ * @member:    the name of the list_struct within the struct.
+ */
+#define list_entry(ptr, type, member) \
+       container_of(ptr, type, member)
+
+/**
+ * list_for_each_entry -       iterate over list of given type
+ * @pos:       the type * to use as a loop cursor.
+ * @head:      the head for your list.
+ * @member:    the name of the list_struct within the struct.
+ */
+#define list_for_each_entry(pos, head, member)                         \
+       for (pos = list_entry((head)->next, typeof(*pos), member);      \
+            &pos->member != (head);    \
+            pos = list_entry(pos->member.next, typeof(*pos), member))
+
+/**
+ * list_empty - tests whether a list is empty
+ * @head: the list to test.
+ */
+static inline int list_empty(const struct list_head *head)
+{
+       return head->next == head;
+}
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_add(struct list_head *_new,
+                             struct list_head *prev,
+                             struct list_head *next)
+{
+       next->prev = _new;
+       _new->next = next;
+       _new->prev = prev;
+       prev->next = _new;
+}
+
+/**
+ * list_add_tail - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head.
+ * This is useful for implementing queues.
+ */
+static inline void list_add_tail(struct list_head *_new, struct list_head *head)
+{
+       __list_add(_new, head->prev, head);
+}
+
+#endif
index 1d1c085..ef1a738 100644 (file)
@@ -21,9 +21,9 @@ P(menu_get_root_menu,struct menu *,(struct menu *menu));
 P(menu_get_parent_menu,struct menu *,(struct menu *menu));
 P(menu_has_help,bool,(struct menu *menu));
 P(menu_get_help,const char *,(struct menu *menu));
-P(get_symbol_str, void, (struct gstr *r, struct symbol *sym, struct jk_head
+P(get_symbol_str, void, (struct gstr *r, struct symbol *sym, struct list_head
                         *head));
-P(get_relations_str, struct gstr, (struct symbol **sym_arr, struct jk_head
+P(get_relations_str, struct gstr, (struct symbol **sym_arr, struct list_head
                                   *head));
 P(menu_get_ext_help,void,(struct menu *menu, struct gstr *help));
 
index 48f6744..53975cf 100644 (file)
@@ -312,7 +312,7 @@ static void set_config_filename(const char *config_filename)
 
 
 struct search_data {
-       struct jk_head *head;
+       struct list_head *head;
        struct menu **targets;
        int *keys;
 };
@@ -323,7 +323,7 @@ static void update_text(char *buf, size_t start, size_t end, void *_data)
        struct jump_key *pos;
        int k = 0;
 
-       CIRCLEQ_FOREACH(pos, data->head, entries) {
+       list_for_each_entry(pos, data->head, entries) {
                if (pos->offset >= start && pos->offset < end) {
                        char header[4];
 
@@ -375,7 +375,7 @@ again:
 
        sym_arr = sym_re_search(dialog_input);
        do {
-               struct jk_head head = CIRCLEQ_HEAD_INITIALIZER(head);
+               LIST_HEAD(head);
                struct menu *targets[JUMP_NB];
                int keys[JUMP_NB + 1], i;
                struct search_data data = {
index a3cade6..e98a05c 100644 (file)
@@ -508,7 +508,7 @@ const char *menu_get_help(struct menu *menu)
 }
 
 static void get_prompt_str(struct gstr *r, struct property *prop,
-                          struct jk_head *head)
+                          struct list_head *head)
 {
        int i, j;
        struct menu *submenu[8], *menu, *location = NULL;
@@ -544,12 +544,13 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
                } else
                        jump->target = location;
 
-               if (CIRCLEQ_EMPTY(head))
+               if (list_empty(head))
                        jump->index = 0;
                else
-                       jump->index = CIRCLEQ_LAST(head)->index + 1;
+                       jump->index = list_entry(head->prev, struct jump_key,
+                                                entries)->index + 1;
 
-               CIRCLEQ_INSERT_TAIL(head, jump, entries);
+               list_add_tail(&jump->entries, head);
        }
 
        if (i > 0) {
@@ -573,7 +574,8 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
 /*
  * head is optional and may be NULL
  */
-void get_symbol_str(struct gstr *r, struct symbol *sym, struct jk_head *head)
+void get_symbol_str(struct gstr *r, struct symbol *sym,
+                   struct list_head *head)
 {
        bool hit;
        struct property *prop;
@@ -612,7 +614,7 @@ void get_symbol_str(struct gstr *r, struct symbol *sym, struct jk_head *head)
        str_append(r, "\n\n");
 }
 
-struct gstr get_relations_str(struct symbol **sym_arr, struct jk_head *head)
+struct gstr get_relations_str(struct symbol **sym_arr, struct list_head *head)
 {
        struct symbol *sym;
        struct gstr res = str_new();
index 87ca59d..974a20b 100755 (executable)
@@ -156,12 +156,12 @@ sub asn1_extract($$@)
 
        if ($l == 0x1) {
            $len = unpack("C", substr(${$cursor->[2]}, $cursor->[0], 1));
-       } elsif ($l = 0x2) {
+       } elsif ($l == 0x2) {
            $len = unpack("n", substr(${$cursor->[2]}, $cursor->[0], 2));
-       } elsif ($l = 0x3) {
+       } elsif ($l == 0x3) {
            $len = unpack("C", substr(${$cursor->[2]}, $cursor->[0], 1)) << 16;
            $len = unpack("n", substr(${$cursor->[2]}, $cursor->[0] + 1, 2));
-       } elsif ($l = 0x4) {
+       } elsif ($l == 0x4) {
            $len = unpack("N", substr(${$cursor->[2]}, $cursor->[0], 4));
        } else {
            die $x509, ": ", $cursor->[0], ": ASN.1 element too long (", $l, ")\n";
index 842c254..b08d20c 100644 (file)
@@ -164,8 +164,8 @@ static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
        struct dev_exception_item *ex, *tmp;
 
        list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
-               list_del(&ex->list);
-               kfree(ex);
+               list_del_rcu(&ex->list);
+               kfree_rcu(ex, rcu);
        }
 }
 
@@ -298,7 +298,7 @@ static int may_access(struct dev_cgroup *dev_cgroup,
        struct dev_exception_item *ex;
        bool match = false;
 
-       list_for_each_entry(ex, &dev_cgroup->exceptions, list) {
+       list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) {
                if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
                        continue;
                if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR))
@@ -352,6 +352,8 @@ static int parent_has_perm(struct dev_cgroup *childcg,
  */
 static inline int may_allow_all(struct dev_cgroup *parent)
 {
+       if (!parent)
+               return 1;
        return parent->behavior == DEVCG_DEFAULT_ALLOW;
 }
 
@@ -376,11 +378,14 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
        int count, rc;
        struct dev_exception_item ex;
        struct cgroup *p = devcgroup->css.cgroup;
-       struct dev_cgroup *parent = cgroup_to_devcgroup(p->parent);
+       struct dev_cgroup *parent = NULL;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
+       if (p->parent)
+               parent = cgroup_to_devcgroup(p->parent);
+
        memset(&ex, 0, sizeof(ex));
        b = buffer;
 
@@ -391,11 +396,14 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
                        if (!may_allow_all(parent))
                                return -EPERM;
                        dev_exception_clean(devcgroup);
+                       devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
+                       if (!parent)
+                               break;
+
                        rc = dev_exceptions_copy(&devcgroup->exceptions,
                                                 &parent->exceptions);
                        if (rc)
                                return rc;
-                       devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
                        break;
                case DEVCG_DENY:
                        dev_exception_clean(devcgroup);
index 28f911c..c5454c0 100644 (file)
@@ -174,7 +174,8 @@ static void sel_netnode_insert(struct sel_netnode *node)
        if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) {
                struct sel_netnode *tail;
                tail = list_entry(
-                       rcu_dereference(sel_netnode_hash[idx].list.prev),
+                       rcu_dereference_protected(sel_netnode_hash[idx].list.prev,
+                                                 lockdep_is_held(&sel_netnode_lock)),
                        struct sel_netnode, list);
                list_del_rcu(&tail->list);
                kfree_rcu(tail, rcu);
index a9a2e63..e8a1d18 100644 (file)
@@ -76,6 +76,7 @@ static int snd_mixer_oss_open(struct inode *inode, struct file *file)
                snd_card_unref(card);
                return -EFAULT;
        }
+       snd_card_unref(card);
        return 0;
 }
 
index f337b66..4c1cc51 100644 (file)
@@ -2454,6 +2454,7 @@ static int snd_pcm_oss_open(struct inode *inode, struct file *file)
        mutex_unlock(&pcm->open_mutex);
        if (err < 0)
                goto __error;
+       snd_card_unref(pcm->card);
        return err;
 
       __error:
index 6e8872d..f9ddecf 100644 (file)
@@ -2122,7 +2122,8 @@ static int snd_pcm_playback_open(struct inode *inode, struct file *file)
        pcm = snd_lookup_minor_data(iminor(inode),
                                    SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
        err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK);
-       snd_card_unref(pcm->card);
+       if (pcm)
+               snd_card_unref(pcm->card);
        return err;
 }
 
@@ -2135,7 +2136,8 @@ static int snd_pcm_capture_open(struct inode *inode, struct file *file)
        pcm = snd_lookup_minor_data(iminor(inode),
                                    SNDRV_DEVICE_TYPE_PCM_CAPTURE);
        err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE);
-       snd_card_unref(pcm->card);
+       if (pcm)
+               snd_card_unref(pcm->card);
        return err;
 }
 
index 89780c3..70ccdab 100644 (file)
@@ -114,7 +114,7 @@ void *snd_lookup_minor_data(unsigned int minor, int type)
        mreg = snd_minors[minor];
        if (mreg && mreg->type == type) {
                private_data = mreg->private_data;
-               if (mreg->card_ptr)
+               if (private_data && mreg->card_ptr)
                        atomic_inc(&mreg->card_ptr->refcount);
        } else
                private_data = NULL;
index e1d79ee..726a49a 100644 (file)
@@ -54,7 +54,7 @@ void *snd_lookup_oss_minor_data(unsigned int minor, int type)
        mreg = snd_oss_minors[minor];
        if (mreg && mreg->type == type) {
                private_data = mreg->private_data;
-               if (mreg->card_ptr)
+               if (private_data && mreg->card_ptr)
                        atomic_inc(&mreg->card_ptr->refcount);
        } else
                private_data = NULL;
index ef68d71..e04e750 100644 (file)
@@ -426,7 +426,7 @@ static struct snd_kcontrol_new snd_ak4113_iec958_controls[] = {
 },
 {
        .iface =        SNDRV_CTL_ELEM_IFACE_PCM,
-       .name =         "IEC958 Preample Capture Default",
+       .name =         "IEC958 Preamble Capture Default",
        .access =       SNDRV_CTL_ELEM_ACCESS_READ |
                SNDRV_CTL_ELEM_ACCESS_VOLATILE,
        .info =         snd_ak4113_spdif_pinfo,
index 816e7d2..5bf4fca 100644 (file)
@@ -401,7 +401,7 @@ static struct snd_kcontrol_new snd_ak4114_iec958_controls[] = {
 },
 {
        .iface =        SNDRV_CTL_ELEM_IFACE_PCM,
-       .name =         "IEC958 Preample Capture Default",
+       .name =         "IEC958 Preamble Capture Default",
        .access =       SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
        .info =         snd_ak4114_spdif_pinfo,
        .get =          snd_ak4114_spdif_pget,
index b4b2a51..40e33c9 100644 (file)
@@ -380,7 +380,7 @@ static struct snd_kcontrol_new snd_ak4117_iec958_controls[] = {
 },
 {
        .iface =        SNDRV_CTL_ELEM_IFACE_PCM,
-       .name =         "IEC958 Preample Capture Default",
+       .name =         "IEC958 Preamble Capture Default",
        .access =       SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
        .info =         snd_ak4117_spdif_pinfo,
        .get =          snd_ak4117_spdif_pget,
index 5d0e568..7266020 100644 (file)
@@ -2581,9 +2581,14 @@ static u8 snd_es1968_tea575x_get_pins(struct snd_tea575x *tea)
        struct es1968 *chip = tea->private_data;
        unsigned long io = chip->io_port + GPIO_DATA;
        u16 val = inw(io);
+       u8 ret;
 
-       return  (val & STR_DATA) ? TEA575X_DATA : 0 |
-               (val & STR_MOST) ? TEA575X_MOST : 0;
+       ret = 0;
+       if (val & STR_DATA)
+               ret |= TEA575X_DATA;
+       if (val & STR_MOST)
+               ret |= TEA575X_MOST;
+       return ret;
 }
 
 static void snd_es1968_tea575x_set_direction(struct snd_tea575x *tea, bool output)
@@ -2655,6 +2660,8 @@ static struct ess_device_list pm_whitelist[] __devinitdata = {
        { TYPE_MAESTRO2E, 0x1179 },
        { TYPE_MAESTRO2E, 0x14c0 },     /* HP omnibook 4150 */
        { TYPE_MAESTRO2E, 0x1558 },
+       { TYPE_MAESTRO2E, 0x125d },     /* a PCI card, e.g. Terratec DMX */
+       { TYPE_MAESTRO2, 0x125d },      /* a PCI card, e.g. SF64-PCE2 */
 };
 
 static struct ess_device_list mpu_blacklist[] __devinitdata = {
index cc2e91d..c5806f8 100644 (file)
@@ -767,9 +767,14 @@ static u8 snd_fm801_tea575x_get_pins(struct snd_tea575x *tea)
        struct fm801 *chip = tea->private_data;
        unsigned short reg = inw(FM801_REG(chip, GPIO_CTRL));
        struct snd_fm801_tea575x_gpio gpio = *get_tea575x_gpio(chip);
-
-       return  (reg & FM801_GPIO_GP(gpio.data)) ? TEA575X_DATA : 0 |
-               (reg & FM801_GPIO_GP(gpio.most)) ? TEA575X_MOST : 0;
+       u8 ret;
+
+       ret = 0;
+       if (reg & FM801_GPIO_GP(gpio.data))
+               ret |= TEA575X_DATA;
+       if (reg & FM801_GPIO_GP(gpio.most))
+               ret |= TEA575X_MOST;
+       return ret;
 }
 
 static void snd_fm801_tea575x_set_direction(struct snd_tea575x *tea, bool output)
index 70d4848..d010de1 100644 (file)
@@ -95,6 +95,7 @@ int snd_hda_delete_codec_preset(struct hda_codec_preset_list *preset)
 EXPORT_SYMBOL_HDA(snd_hda_delete_codec_preset);
 
 #ifdef CONFIG_PM
+#define codec_in_pm(codec)     ((codec)->in_pm)
 static void hda_power_work(struct work_struct *work);
 static void hda_keep_power_on(struct hda_codec *codec);
 #define hda_codec_is_power_on(codec)   ((codec)->power_on)
@@ -104,6 +105,7 @@ static inline void hda_call_pm_notify(struct hda_bus *bus, bool power_up)
                bus->ops.pm_notify(bus, power_up);
 }
 #else
+#define codec_in_pm(codec)     0
 static inline void hda_keep_power_on(struct hda_codec *codec) {}
 #define hda_codec_is_power_on(codec)   1
 #define hda_call_pm_notify(bus, state) {}
@@ -228,7 +230,7 @@ static int codec_exec_verb(struct hda_codec *codec, unsigned int cmd,
        }
        mutex_unlock(&bus->cmd_mutex);
        snd_hda_power_down(codec);
-       if (res && *res == -1 && bus->rirb_error) {
+       if (!codec_in_pm(codec) && res && *res == -1 && bus->rirb_error) {
                if (bus->response_reset) {
                        snd_printd("hda_codec: resetting BUS due to "
                                   "fatal communication error\n");
@@ -238,7 +240,7 @@ static int codec_exec_verb(struct hda_codec *codec, unsigned int cmd,
                goto again;
        }
        /* clear reset-flag when the communication gets recovered */
-       if (!err)
+       if (!err || codec_in_pm(codec))
                bus->response_reset = 0;
        return err;
 }
@@ -3616,6 +3618,8 @@ static unsigned int hda_call_codec_suspend(struct hda_codec *codec, bool in_wq)
 {
        unsigned int state;
 
+       codec->in_pm = 1;
+
        if (codec->patch_ops.suspend)
                codec->patch_ops.suspend(codec);
        hda_cleanup_all_streams(codec);
@@ -3630,6 +3634,7 @@ static unsigned int hda_call_codec_suspend(struct hda_codec *codec, bool in_wq)
        codec->power_transition = 0;
        codec->power_jiffies = jiffies;
        spin_unlock(&codec->power_lock);
+       codec->in_pm = 0;
        return state;
 }
 
@@ -3638,6 +3643,8 @@ static unsigned int hda_call_codec_suspend(struct hda_codec *codec, bool in_wq)
  */
 static void hda_call_codec_resume(struct hda_codec *codec)
 {
+       codec->in_pm = 1;
+
        /* set as if powered on for avoiding re-entering the resume
         * in the resume / power-save sequence
         */
@@ -3656,6 +3663,8 @@ static void hda_call_codec_resume(struct hda_codec *codec)
                snd_hda_codec_resume_cache(codec);
        }
        snd_hda_jack_report_sync(codec);
+
+       codec->in_pm = 0;
        snd_hda_power_down(codec); /* flag down before returning */
 }
 #endif /* CONFIG_PM */
index 507fe8a..4f4e545 100644 (file)
@@ -869,6 +869,7 @@ struct hda_codec {
        unsigned int power_on :1;       /* current (global) power-state */
        unsigned int d3_stop_clk:1;     /* support D3 operation without BCLK */
        unsigned int pm_down_notified:1; /* PM notified to controller */
+       unsigned int in_pm:1;           /* suspend/resume being performed */
        int power_transition;   /* power-state in transition */
        int power_count;        /* current (global) power refcount */
        struct delayed_work power_work; /* delayed task for powerdown */
index 72b085a..f9d870e 100644 (file)
@@ -556,6 +556,12 @@ enum {
 #define AZX_DCAPS_ALIGN_BUFSIZE        (1 << 22)       /* buffer size alignment */
 #define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23)   /* BDLE in 4k boundary */
 #define AZX_DCAPS_COUNT_LPIB_DELAY  (1 << 25)  /* Take LPIB as delay */
+#define AZX_DCAPS_PM_RUNTIME   (1 << 26)       /* runtime PM support */
+
+/* quirks for Intel PCH */
+#define AZX_DCAPS_INTEL_PCH \
+       (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \
+        AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_PM_RUNTIME)
 
 /* quirks for ATI SB / AMD Hudson */
 #define AZX_DCAPS_PRESET_ATI_SB \
@@ -2433,6 +2439,9 @@ static void azx_power_notify(struct hda_bus *bus, bool power_up)
 {
        struct azx *chip = bus->private_data;
 
+       if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
+               return;
+
        if (power_up)
                pm_runtime_get_sync(&chip->pci->dev);
        else
@@ -2548,7 +2557,8 @@ static int azx_runtime_suspend(struct device *dev)
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip = card->private_data;
 
-       if (!power_save_controller)
+       if (!power_save_controller ||
+           !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
                return -EAGAIN;
 
        azx_stop_chip(chip);
@@ -3429,39 +3439,30 @@ static void __devexit azx_remove(struct pci_dev *pci)
 static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
        /* CPT */
        { PCI_DEVICE(0x8086, 0x1c20),
-         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
-         AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
        /* PBG */
        { PCI_DEVICE(0x8086, 0x1d20),
-         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
-         AZX_DCAPS_BUFSIZE},
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
        /* Panther Point */
        { PCI_DEVICE(0x8086, 0x1e20),
-         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
-         AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
        /* Lynx Point */
        { PCI_DEVICE(0x8086, 0x8c20),
-         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
-         AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
        /* Lynx Point-LP */
        { PCI_DEVICE(0x8086, 0x9c20),
-         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
-         AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
        /* Lynx Point-LP */
        { PCI_DEVICE(0x8086, 0x9c21),
-         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
-         AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
        /* Haswell */
        { PCI_DEVICE(0x8086, 0x0c0c),
-         .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
-         AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+         .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH },
        { PCI_DEVICE(0x8086, 0x0d0c),
-         .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
-         AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+         .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH },
        /* 5 Series/3400 */
        { PCI_DEVICE(0x8086, 0x3b56),
-         .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
-         AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+         .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH },
        /* SCH */
        { PCI_DEVICE(0x8086, 0x811b),
          .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
@@ -3563,6 +3564,8 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
        /* Teradici */
        { PCI_DEVICE(0x6549, 0x1200),
          .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT },
+       { PCI_DEVICE(0x6549, 0x2200),
+         .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT },
        /* Creative X-Fi (CA0110-IBG) */
        /* CTHDA chips */
        { PCI_DEVICE(0x1102, 0x0010),
index cdd43ea..1eeba73 100644 (file)
@@ -545,6 +545,7 @@ static int ad198x_build_pcms(struct hda_codec *codec)
        if (spec->multiout.dig_out_nid) {
                info++;
                codec->num_pcms++;
+               codec->spdif_status_reset = 1;
                info->name = "AD198x Digital";
                info->pcm_type = HDA_PCM_TYPE_SPDIF;
                info->stream[SNDRV_PCM_STREAM_PLAYBACK] = ad198x_pcm_digital_playback;
index 61a7113..3bcb671 100644 (file)
@@ -101,8 +101,8 @@ enum {
 #define CS420X_VENDOR_NID      0x11
 #define CS_DIG_OUT1_PIN_NID    0x10
 #define CS_DIG_OUT2_PIN_NID    0x15
-#define CS_DMIC1_PIN_NID       0x12
-#define CS_DMIC2_PIN_NID       0x0e
+#define CS_DMIC1_PIN_NID       0x0e
+#define CS_DMIC2_PIN_NID       0x12
 
 /* coef indices */
 #define IDX_SPDIF_STAT         0x0000
@@ -466,6 +466,7 @@ static int parse_output(struct hda_codec *codec)
                memcpy(cfg->speaker_pins, cfg->line_out_pins,
                       sizeof(cfg->speaker_pins));
                cfg->line_outs = 0;
+               memset(cfg->line_out_pins, 0, sizeof(cfg->line_out_pins));
        }
 
        return 0;
@@ -1079,14 +1080,18 @@ static void init_input(struct hda_codec *codec)
                        cs_automic(codec, NULL);
 
                coef = 0x000a; /* ADC1/2 - Digital and Analog Soft Ramp */
+               cs_vendor_coef_set(codec, IDX_ADC_CFG, coef);
+
+               coef = cs_vendor_coef_get(codec, IDX_BEEP_CFG);
                if (is_active_pin(codec, CS_DMIC2_PIN_NID))
-                       coef |= 0x0500; /* DMIC2 2 chan on, GPIO1 off */
+                       coef |= 1 << 4; /* DMIC2 2 chan on, GPIO1 off */
                if (is_active_pin(codec, CS_DMIC1_PIN_NID))
-                       coef |= 0x1800; /* DMIC1 2 chan on, GPIO0 off
+                       coef |= 1 << 3; /* DMIC1 2 chan on, GPIO0 off
                                         * No effect if SPDIF_OUT2 is
                                         * selected in IDX_SPDIF_CTL.
                                        */
-               cs_vendor_coef_set(codec, IDX_ADC_CFG, coef);
+
+               cs_vendor_coef_set(codec, IDX_BEEP_CFG, coef);
        } else {
                if (spec->mic_detect)
                        cs_automic(codec, NULL);
@@ -1107,7 +1112,7 @@ static const struct hda_verb cs_coef_init_verbs[] = {
          | 0x0400 /* Disable Coefficient Auto increment */
          )},
        /* Beep */
-       {0x11, AC_VERB_SET_COEF_INDEX, IDX_DAC_CFG},
+       {0x11, AC_VERB_SET_COEF_INDEX, IDX_BEEP_CFG},
        {0x11, AC_VERB_SET_PROC_COEF, 0x0007}, /* Enable Beep thru DAC1/2/3 */
 
        {} /* terminator */
@@ -1728,8 +1733,7 @@ static int cs421x_mux_enum_put(struct snd_kcontrol *kcontrol,
 
 }
 
-static struct snd_kcontrol_new cs421x_capture_source = {
-
+static const struct snd_kcontrol_new cs421x_capture_source = {
        .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
        .name = "Capture Source",
        .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
@@ -1946,7 +1950,7 @@ static int cs421x_suspend(struct hda_codec *codec)
 }
 #endif
 
-static struct hda_codec_ops cs421x_patch_ops = {
+static const struct hda_codec_ops cs421x_patch_ops = {
        .build_controls = cs421x_build_controls,
        .build_pcms = cs_build_pcms,
        .init = cs421x_init,
index f7397ad..ad68d22 100644 (file)
@@ -5407,6 +5407,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF),
        SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF),
        SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 5,1", ALC885_FIXUP_MACPRO_GPIO),
+       SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF),
        SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
        SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
        SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF),
@@ -5840,7 +5841,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
        return alc_parse_auto_config(codec, alc269_ignore, ssids);
 }
 
-static void alc269_toggle_power_output(struct hda_codec *codec, int power_up)
+static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up)
 {
        int val = alc_read_coef_idx(codec, 0x04);
        if (power_up)
@@ -5857,10 +5858,10 @@ static void alc269_shutup(struct hda_codec *codec)
        if (spec->codec_variant != ALC269_TYPE_ALC269VB)
                return;
 
-       if ((alc_get_coef0(codec) & 0x00ff) == 0x017)
-               alc269_toggle_power_output(codec, 0);
-       if ((alc_get_coef0(codec) & 0x00ff) == 0x018) {
-               alc269_toggle_power_output(codec, 0);
+       if (spec->codec_variant == ALC269_TYPE_ALC269VB)
+               alc269vb_toggle_power_output(codec, 0);
+       if (spec->codec_variant == ALC269_TYPE_ALC269VB &&
+                       (alc_get_coef0(codec) & 0x00ff) == 0x018) {
                msleep(150);
        }
 }
@@ -5870,24 +5871,22 @@ static int alc269_resume(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
 
-       if (spec->codec_variant == ALC269_TYPE_ALC269VB ||
+       if (spec->codec_variant == ALC269_TYPE_ALC269VB)
+               alc269vb_toggle_power_output(codec, 0);
+       if (spec->codec_variant == ALC269_TYPE_ALC269VB &&
                        (alc_get_coef0(codec) & 0x00ff) == 0x018) {
-               alc269_toggle_power_output(codec, 0);
                msleep(150);
        }
 
        codec->patch_ops.init(codec);
 
-       if (spec->codec_variant == ALC269_TYPE_ALC269VB ||
+       if (spec->codec_variant == ALC269_TYPE_ALC269VB)
+               alc269vb_toggle_power_output(codec, 1);
+       if (spec->codec_variant == ALC269_TYPE_ALC269VB &&
                        (alc_get_coef0(codec) & 0x00ff) == 0x017) {
-               alc269_toggle_power_output(codec, 1);
                msleep(200);
        }
 
-       if (spec->codec_variant == ALC269_TYPE_ALC269VB ||
-                       (alc_get_coef0(codec) & 0x00ff) == 0x018)
-               alc269_toggle_power_output(codec, 1);
-
        snd_hda_codec_resume_amp(codec);
        snd_hda_codec_resume_cache(codec);
        hda_call_check_power_status(codec, 0x01);
@@ -7066,6 +7065,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
        { .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 },
        { .id = 0x10ec0283, .name = "ALC283", .patch = patch_alc269 },
        { .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 },
+       { .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 },
        { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
          .patch = patch_alc861 },
        { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
@@ -7079,6 +7079,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
          .patch = patch_alc662 },
        { .id = 0x10ec0663, .name = "ALC663", .patch = patch_alc662 },
        { .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 },
+       { .id = 0x10ec0668, .name = "ALC668", .patch = patch_alc662 },
        { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 },
        { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 },
        { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 },
@@ -7096,6 +7097,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
        { .id = 0x10ec0889, .name = "ALC889", .patch = patch_alc882 },
        { .id = 0x10ec0892, .name = "ALC892", .patch = patch_alc662 },
        { .id = 0x10ec0899, .name = "ALC898", .patch = patch_alc882 },
+       { .id = 0x10ec0900, .name = "ALC1150", .patch = patch_alc882 },
        {} /* terminator */
 };
 
index 72a2f60..019e1a0 100644 (file)
@@ -1809,11 +1809,11 @@ static int via_auto_fill_dac_nids(struct hda_codec *codec)
 {
        struct via_spec *spec = codec->spec;
        const struct auto_pin_cfg *cfg = &spec->autocfg;
-       int i, dac_num;
+       int i;
        hda_nid_t nid;
 
+       spec->multiout.num_dacs = 0;
        spec->multiout.dac_nids = spec->private_dac_nids;
-       dac_num = 0;
        for (i = 0; i < cfg->line_outs; i++) {
                hda_nid_t dac = 0;
                nid = cfg->line_out_pins[i];
@@ -1824,16 +1824,13 @@ static int via_auto_fill_dac_nids(struct hda_codec *codec)
                if (!i && parse_output_path(codec, nid, dac, 1,
                                            &spec->out_mix_path))
                        dac = spec->out_mix_path.path[0];
-               if (dac) {
-                       spec->private_dac_nids[i] = dac;
-                       dac_num++;
-               }
+               if (dac)
+                       spec->private_dac_nids[spec->multiout.num_dacs++] = dac;
        }
        if (!spec->out_path[0].depth && spec->out_mix_path.depth) {
                spec->out_path[0] = spec->out_mix_path;
                spec->out_mix_path.depth = 0;
        }
-       spec->multiout.num_dacs = dac_num;
        return 0;
 }
 
@@ -3628,6 +3625,7 @@ static void set_widgets_power_state_vt2002P(struct hda_codec *codec)
  */
 enum {
        VIA_FIXUP_INTMIC_BOOST,
+       VIA_FIXUP_ASUS_G75,
 };
 
 static void via_fixup_intmic_boost(struct hda_codec *codec,
@@ -3642,13 +3640,35 @@ static const struct hda_fixup via_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = via_fixup_intmic_boost,
        },
+       [VIA_FIXUP_ASUS_G75] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       /* set 0x24 and 0x33 as speakers */
+                       { 0x24, 0x991301f0 },
+                       { 0x33, 0x991301f1 }, /* subwoofer */
+                       { }
+               }
+       },
 };
 
 static const struct snd_pci_quirk vt2002p_fixups[] = {
+       SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75),
        SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST),
        {}
 };
 
+/* NIDs 0x24 and 0x33 on VT1802 have connections to non-existing NID 0x3e
+ * Replace this with mixer NID 0x1c
+ */
+static void fix_vt1802_connections(struct hda_codec *codec)
+{
+       static hda_nid_t conn_24[] = { 0x14, 0x1c };
+       static hda_nid_t conn_33[] = { 0x1c };
+
+       snd_hda_override_conn_list(codec, 0x24, ARRAY_SIZE(conn_24), conn_24);
+       snd_hda_override_conn_list(codec, 0x33, ARRAY_SIZE(conn_33), conn_33);
+}
+
 /* patch for vt2002P */
 static int patch_vt2002P(struct hda_codec *codec)
 {
@@ -3663,6 +3683,8 @@ static int patch_vt2002P(struct hda_codec *codec)
        spec->aa_mix_nid = 0x21;
        override_mic_boost(codec, 0x2b, 0, 3, 40);
        override_mic_boost(codec, 0x29, 0, 3, 40);
+       if (spec->codec_type == VT1802)
+               fix_vt1802_connections(codec);
        add_secret_dac_path(codec);
 
        snd_hda_pick_fixup(codec, NULL, vt2002p_fixups, via_fixups);
index f1cd1e3..748e36c 100644 (file)
@@ -3979,7 +3979,8 @@ static int snd_hdspm_get_sync_check(struct snd_kcontrol *kcontrol,
                case 8: /* SYNC IN */
                        val = hdspm_sync_in_sync_check(hdspm); break;
                default:
-                       val = hdspm_s1_sync_check(hdspm, ucontrol->id.index-1);
+                       val = hdspm_s1_sync_check(hdspm,
+                                       kcontrol->private_value-1);
                }
                break;
 
@@ -4899,7 +4900,7 @@ snd_hdspm_proc_read_madi(struct snd_info_entry * entry,
                insel = "Coaxial";
                break;
        default:
-               insel = "Unkown";
+               insel = "Unknown";
        }
 
        snd_iprintf(buffer,
index c03b65a..054967d 100644 (file)
@@ -268,7 +268,7 @@ EXPORT_SYMBOL_GPL(arizona_out_ev);
 static unsigned int arizona_sysclk_48k_rates[] = {
        6144000,
        12288000,
-       22579200,
+       24576000,
        49152000,
        73728000,
        98304000,
@@ -278,7 +278,7 @@ static unsigned int arizona_sysclk_48k_rates[] = {
 static unsigned int arizona_sysclk_44k1_rates[] = {
        5644800,
        11289600,
-       24576000,
+       22579200,
        45158400,
        67737600,
        90316800,
index f994af3..e3f0a7f 100644 (file)
@@ -485,7 +485,7 @@ static int cs4271_probe(struct snd_soc_codec *codec)
                gpio_nreset = cs4271plat->gpio_nreset;
 
        if (gpio_nreset >= 0)
-               if (gpio_request(gpio_nreset, "CS4271 Reset"))
+               if (devm_gpio_request(codec->dev, gpio_nreset, "CS4271 Reset"))
                        gpio_nreset = -EINVAL;
        if (gpio_nreset >= 0) {
                /* Reset codec */
@@ -535,15 +535,10 @@ static int cs4271_probe(struct snd_soc_codec *codec)
 static int cs4271_remove(struct snd_soc_codec *codec)
 {
        struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
-       int gpio_nreset;
 
-       gpio_nreset = cs4271->gpio_nreset;
-
-       if (gpio_is_valid(gpio_nreset)) {
+       if (gpio_is_valid(cs4271->gpio_nreset))
                /* Set codec to the reset state */
-               gpio_set_value(gpio_nreset, 0);
-               gpio_free(gpio_nreset);
-       }
+               gpio_set_value(cs4271->gpio_nreset, 0);
 
        return 0;
 };
index 6159929..97a8105 100644 (file)
@@ -763,7 +763,7 @@ static int cs42l52_set_sysclk(struct snd_soc_dai *codec_dai,
        if ((freq >= CS42L52_MIN_CLK) && (freq <= CS42L52_MAX_CLK)) {
                cs42l52->sysclk = freq;
        } else {
-               dev_err(codec->dev, "Invalid freq paramter\n");
+               dev_err(codec->dev, "Invalid freq parameter\n");
                return -EINVAL;
        }
        return 0;
@@ -773,7 +773,6 @@ static int cs42l52_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
 {
        struct snd_soc_codec *codec = codec_dai->codec;
        struct cs42l52_private *cs42l52 = snd_soc_codec_get_drvdata(codec);
-       int ret = 0;
        u8 iface = 0;
 
        switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
@@ -822,7 +821,7 @@ static int cs42l52_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
        case SND_SOC_DAIFMT_NB_IF:
                break;
        default:
-               ret = -EINVAL;
+               return -EINVAL;
        }
        cs42l52->config.format = iface;
        snd_soc_write(codec, CS42L52_IFACE_CTL1, cs42l52->config.format);
index 1722b58..7394e73 100644 (file)
@@ -42,6 +42,556 @@ static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
 static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
 static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0);
 
+static const struct reg_default wm5102_sysclk_reva_patch[] = {
+       { 0x3000, 0x2225 },
+       { 0x3001, 0x3a03 },
+       { 0x3002, 0x0225 },
+       { 0x3003, 0x0801 },
+       { 0x3004, 0x6249 },
+       { 0x3005, 0x0c04 },
+       { 0x3006, 0x0225 },
+       { 0x3007, 0x5901 },
+       { 0x3008, 0xe249 },
+       { 0x3009, 0x030d },
+       { 0x300a, 0x0249 },
+       { 0x300b, 0x2c01 },
+       { 0x300c, 0xe249 },
+       { 0x300d, 0x4342 },
+       { 0x300e, 0xe249 },
+       { 0x300f, 0x73c0 },
+       { 0x3010, 0x4249 },
+       { 0x3011, 0x0c00 },
+       { 0x3012, 0x0225 },
+       { 0x3013, 0x1f01 },
+       { 0x3014, 0x0225 },
+       { 0x3015, 0x1e01 },
+       { 0x3016, 0x0225 },
+       { 0x3017, 0xfa00 },
+       { 0x3018, 0x0000 },
+       { 0x3019, 0xf000 },
+       { 0x301a, 0x0000 },
+       { 0x301b, 0xf000 },
+       { 0x301c, 0x0000 },
+       { 0x301d, 0xf000 },
+       { 0x301e, 0x0000 },
+       { 0x301f, 0xf000 },
+       { 0x3020, 0x0000 },
+       { 0x3021, 0xf000 },
+       { 0x3022, 0x0000 },
+       { 0x3023, 0xf000 },
+       { 0x3024, 0x0000 },
+       { 0x3025, 0xf000 },
+       { 0x3026, 0x0000 },
+       { 0x3027, 0xf000 },
+       { 0x3028, 0x0000 },
+       { 0x3029, 0xf000 },
+       { 0x302a, 0x0000 },
+       { 0x302b, 0xf000 },
+       { 0x302c, 0x0000 },
+       { 0x302d, 0xf000 },
+       { 0x302e, 0x0000 },
+       { 0x302f, 0xf000 },
+       { 0x3030, 0x0225 },
+       { 0x3031, 0x1a01 },
+       { 0x3032, 0x0225 },
+       { 0x3033, 0x1e00 },
+       { 0x3034, 0x0225 },
+       { 0x3035, 0x1f00 },
+       { 0x3036, 0x6225 },
+       { 0x3037, 0xf800 },
+       { 0x3038, 0x0000 },
+       { 0x3039, 0xf000 },
+       { 0x303a, 0x0000 },
+       { 0x303b, 0xf000 },
+       { 0x303c, 0x0000 },
+       { 0x303d, 0xf000 },
+       { 0x303e, 0x0000 },
+       { 0x303f, 0xf000 },
+       { 0x3040, 0x2226 },
+       { 0x3041, 0x3a03 },
+       { 0x3042, 0x0226 },
+       { 0x3043, 0x0801 },
+       { 0x3044, 0x6249 },
+       { 0x3045, 0x0c06 },
+       { 0x3046, 0x0226 },
+       { 0x3047, 0x5901 },
+       { 0x3048, 0xe249 },
+       { 0x3049, 0x030d },
+       { 0x304a, 0x0249 },
+       { 0x304b, 0x2c01 },
+       { 0x304c, 0xe249 },
+       { 0x304d, 0x4342 },
+       { 0x304e, 0xe249 },
+       { 0x304f, 0x73c0 },
+       { 0x3050, 0x4249 },
+       { 0x3051, 0x0c00 },
+       { 0x3052, 0x0226 },
+       { 0x3053, 0x1f01 },
+       { 0x3054, 0x0226 },
+       { 0x3055, 0x1e01 },
+       { 0x3056, 0x0226 },
+       { 0x3057, 0xfa00 },
+       { 0x3058, 0x0000 },
+       { 0x3059, 0xf000 },
+       { 0x305a, 0x0000 },
+       { 0x305b, 0xf000 },
+       { 0x305c, 0x0000 },
+       { 0x305d, 0xf000 },
+       { 0x305e, 0x0000 },
+       { 0x305f, 0xf000 },
+       { 0x3060, 0x0000 },
+       { 0x3061, 0xf000 },
+       { 0x3062, 0x0000 },
+       { 0x3063, 0xf000 },
+       { 0x3064, 0x0000 },
+       { 0x3065, 0xf000 },
+       { 0x3066, 0x0000 },
+       { 0x3067, 0xf000 },
+       { 0x3068, 0x0000 },
+       { 0x3069, 0xf000 },
+       { 0x306a, 0x0000 },
+       { 0x306b, 0xf000 },
+       { 0x306c, 0x0000 },
+       { 0x306d, 0xf000 },
+       { 0x306e, 0x0000 },
+       { 0x306f, 0xf000 },
+       { 0x3070, 0x0226 },
+       { 0x3071, 0x1a01 },
+       { 0x3072, 0x0226 },
+       { 0x3073, 0x1e00 },
+       { 0x3074, 0x0226 },
+       { 0x3075, 0x1f00 },
+       { 0x3076, 0x6226 },
+       { 0x3077, 0xf800 },
+       { 0x3078, 0x0000 },
+       { 0x3079, 0xf000 },
+       { 0x307a, 0x0000 },
+       { 0x307b, 0xf000 },
+       { 0x307c, 0x0000 },
+       { 0x307d, 0xf000 },
+       { 0x307e, 0x0000 },
+       { 0x307f, 0xf000 },
+       { 0x3080, 0x2227 },
+       { 0x3081, 0x3a03 },
+       { 0x3082, 0x0227 },
+       { 0x3083, 0x0801 },
+       { 0x3084, 0x6255 },
+       { 0x3085, 0x0c04 },
+       { 0x3086, 0x0227 },
+       { 0x3087, 0x5901 },
+       { 0x3088, 0xe255 },
+       { 0x3089, 0x030d },
+       { 0x308a, 0x0255 },
+       { 0x308b, 0x2c01 },
+       { 0x308c, 0xe255 },
+       { 0x308d, 0x4342 },
+       { 0x308e, 0xe255 },
+       { 0x308f, 0x73c0 },
+       { 0x3090, 0x4255 },
+       { 0x3091, 0x0c00 },
+       { 0x3092, 0x0227 },
+       { 0x3093, 0x1f01 },
+       { 0x3094, 0x0227 },
+       { 0x3095, 0x1e01 },
+       { 0x3096, 0x0227 },
+       { 0x3097, 0xfa00 },
+       { 0x3098, 0x0000 },
+       { 0x3099, 0xf000 },
+       { 0x309a, 0x0000 },
+       { 0x309b, 0xf000 },
+       { 0x309c, 0x0000 },
+       { 0x309d, 0xf000 },
+       { 0x309e, 0x0000 },
+       { 0x309f, 0xf000 },
+       { 0x30a0, 0x0000 },
+       { 0x30a1, 0xf000 },
+       { 0x30a2, 0x0000 },
+       { 0x30a3, 0xf000 },
+       { 0x30a4, 0x0000 },
+       { 0x30a5, 0xf000 },
+       { 0x30a6, 0x0000 },
+       { 0x30a7, 0xf000 },
+       { 0x30a8, 0x0000 },
+       { 0x30a9, 0xf000 },
+       { 0x30aa, 0x0000 },
+       { 0x30ab, 0xf000 },
+       { 0x30ac, 0x0000 },
+       { 0x30ad, 0xf000 },
+       { 0x30ae, 0x0000 },
+       { 0x30af, 0xf000 },
+       { 0x30b0, 0x0227 },
+       { 0x30b1, 0x1a01 },
+       { 0x30b2, 0x0227 },
+       { 0x30b3, 0x1e00 },
+       { 0x30b4, 0x0227 },
+       { 0x30b5, 0x1f00 },
+       { 0x30b6, 0x6227 },
+       { 0x30b7, 0xf800 },
+       { 0x30b8, 0x0000 },
+       { 0x30b9, 0xf000 },
+       { 0x30ba, 0x0000 },
+       { 0x30bb, 0xf000 },
+       { 0x30bc, 0x0000 },
+       { 0x30bd, 0xf000 },
+       { 0x30be, 0x0000 },
+       { 0x30bf, 0xf000 },
+       { 0x30c0, 0x2228 },
+       { 0x30c1, 0x3a03 },
+       { 0x30c2, 0x0228 },
+       { 0x30c3, 0x0801 },
+       { 0x30c4, 0x6255 },
+       { 0x30c5, 0x0c06 },
+       { 0x30c6, 0x0228 },
+       { 0x30c7, 0x5901 },
+       { 0x30c8, 0xe255 },
+       { 0x30c9, 0x030d },
+       { 0x30ca, 0x0255 },
+       { 0x30cb, 0x2c01 },
+       { 0x30cc, 0xe255 },
+       { 0x30cd, 0x4342 },
+       { 0x30ce, 0xe255 },
+       { 0x30cf, 0x73c0 },
+       { 0x30d0, 0x4255 },
+       { 0x30d1, 0x0c00 },
+       { 0x30d2, 0x0228 },
+       { 0x30d3, 0x1f01 },
+       { 0x30d4, 0x0228 },
+       { 0x30d5, 0x1e01 },
+       { 0x30d6, 0x0228 },
+       { 0x30d7, 0xfa00 },
+       { 0x30d8, 0x0000 },
+       { 0x30d9, 0xf000 },
+       { 0x30da, 0x0000 },
+       { 0x30db, 0xf000 },
+       { 0x30dc, 0x0000 },
+       { 0x30dd, 0xf000 },
+       { 0x30de, 0x0000 },
+       { 0x30df, 0xf000 },
+       { 0x30e0, 0x0000 },
+       { 0x30e1, 0xf000 },
+       { 0x30e2, 0x0000 },
+       { 0x30e3, 0xf000 },
+       { 0x30e4, 0x0000 },
+       { 0x30e5, 0xf000 },
+       { 0x30e6, 0x0000 },
+       { 0x30e7, 0xf000 },
+       { 0x30e8, 0x0000 },
+       { 0x30e9, 0xf000 },
+       { 0x30ea, 0x0000 },
+       { 0x30eb, 0xf000 },
+       { 0x30ec, 0x0000 },
+       { 0x30ed, 0xf000 },
+       { 0x30ee, 0x0000 },
+       { 0x30ef, 0xf000 },
+       { 0x30f0, 0x0228 },
+       { 0x30f1, 0x1a01 },
+       { 0x30f2, 0x0228 },
+       { 0x30f3, 0x1e00 },
+       { 0x30f4, 0x0228 },
+       { 0x30f5, 0x1f00 },
+       { 0x30f6, 0x6228 },
+       { 0x30f7, 0xf800 },
+       { 0x30f8, 0x0000 },
+       { 0x30f9, 0xf000 },
+       { 0x30fa, 0x0000 },
+       { 0x30fb, 0xf000 },
+       { 0x30fc, 0x0000 },
+       { 0x30fd, 0xf000 },
+       { 0x30fe, 0x0000 },
+       { 0x30ff, 0xf000 },
+       { 0x3100, 0x222b },
+       { 0x3101, 0x3a03 },
+       { 0x3102, 0x222b },
+       { 0x3103, 0x5803 },
+       { 0x3104, 0xe26f },
+       { 0x3105, 0x030d },
+       { 0x3106, 0x626f },
+       { 0x3107, 0x2c01 },
+       { 0x3108, 0xe26f },
+       { 0x3109, 0x4342 },
+       { 0x310a, 0xe26f },
+       { 0x310b, 0x73c0 },
+       { 0x310c, 0x026f },
+       { 0x310d, 0x0c00 },
+       { 0x310e, 0x022b },
+       { 0x310f, 0x1f01 },
+       { 0x3110, 0x022b },
+       { 0x3111, 0x1e01 },
+       { 0x3112, 0x022b },
+       { 0x3113, 0xfa00 },
+       { 0x3114, 0x0000 },
+       { 0x3115, 0xf000 },
+       { 0x3116, 0x0000 },
+       { 0x3117, 0xf000 },
+       { 0x3118, 0x0000 },
+       { 0x3119, 0xf000 },
+       { 0x311a, 0x0000 },
+       { 0x311b, 0xf000 },
+       { 0x311c, 0x0000 },
+       { 0x311d, 0xf000 },
+       { 0x311e, 0x0000 },
+       { 0x311f, 0xf000 },
+       { 0x3120, 0x022b },
+       { 0x3121, 0x0a01 },
+       { 0x3122, 0x022b },
+       { 0x3123, 0x1e00 },
+       { 0x3124, 0x022b },
+       { 0x3125, 0x1f00 },
+       { 0x3126, 0x622b },
+       { 0x3127, 0xf800 },
+       { 0x3128, 0x0000 },
+       { 0x3129, 0xf000 },
+       { 0x312a, 0x0000 },
+       { 0x312b, 0xf000 },
+       { 0x312c, 0x0000 },
+       { 0x312d, 0xf000 },
+       { 0x312e, 0x0000 },
+       { 0x312f, 0xf000 },
+       { 0x3130, 0x0000 },
+       { 0x3131, 0xf000 },
+       { 0x3132, 0x0000 },
+       { 0x3133, 0xf000 },
+       { 0x3134, 0x0000 },
+       { 0x3135, 0xf000 },
+       { 0x3136, 0x0000 },
+       { 0x3137, 0xf000 },
+       { 0x3138, 0x0000 },
+       { 0x3139, 0xf000 },
+       { 0x313a, 0x0000 },
+       { 0x313b, 0xf000 },
+       { 0x313c, 0x0000 },
+       { 0x313d, 0xf000 },
+       { 0x313e, 0x0000 },
+       { 0x313f, 0xf000 },
+       { 0x3140, 0x0000 },
+       { 0x3141, 0xf000 },
+       { 0x3142, 0x0000 },
+       { 0x3143, 0xf000 },
+       { 0x3144, 0x0000 },
+       { 0x3145, 0xf000 },
+       { 0x3146, 0x0000 },
+       { 0x3147, 0xf000 },
+       { 0x3148, 0x0000 },
+       { 0x3149, 0xf000 },
+       { 0x314a, 0x0000 },
+       { 0x314b, 0xf000 },
+       { 0x314c, 0x0000 },
+       { 0x314d, 0xf000 },
+       { 0x314e, 0x0000 },
+       { 0x314f, 0xf000 },
+       { 0x3150, 0x0000 },
+       { 0x3151, 0xf000 },
+       { 0x3152, 0x0000 },
+       { 0x3153, 0xf000 },
+       { 0x3154, 0x0000 },
+       { 0x3155, 0xf000 },
+       { 0x3156, 0x0000 },
+       { 0x3157, 0xf000 },
+       { 0x3158, 0x0000 },
+       { 0x3159, 0xf000 },
+       { 0x315a, 0x0000 },
+       { 0x315b, 0xf000 },
+       { 0x315c, 0x0000 },
+       { 0x315d, 0xf000 },
+       { 0x315e, 0x0000 },
+       { 0x315f, 0xf000 },
+       { 0x3160, 0x0000 },
+       { 0x3161, 0xf000 },
+       { 0x3162, 0x0000 },
+       { 0x3163, 0xf000 },
+       { 0x3164, 0x0000 },
+       { 0x3165, 0xf000 },
+       { 0x3166, 0x0000 },
+       { 0x3167, 0xf000 },
+       { 0x3168, 0x0000 },
+       { 0x3169, 0xf000 },
+       { 0x316a, 0x0000 },
+       { 0x316b, 0xf000 },
+       { 0x316c, 0x0000 },
+       { 0x316d, 0xf000 },
+       { 0x316e, 0x0000 },
+       { 0x316f, 0xf000 },
+       { 0x3170, 0x0000 },
+       { 0x3171, 0xf000 },
+       { 0x3172, 0x0000 },
+       { 0x3173, 0xf000 },
+       { 0x3174, 0x0000 },
+       { 0x3175, 0xf000 },
+       { 0x3176, 0x0000 },
+       { 0x3177, 0xf000 },
+       { 0x3178, 0x0000 },
+       { 0x3179, 0xf000 },
+       { 0x317a, 0x0000 },
+       { 0x317b, 0xf000 },
+       { 0x317c, 0x0000 },
+       { 0x317d, 0xf000 },
+       { 0x317e, 0x0000 },
+       { 0x317f, 0xf000 },
+       { 0x3180, 0x2001 },
+       { 0x3181, 0xf101 },
+       { 0x3182, 0x0000 },
+       { 0x3183, 0xf000 },
+       { 0x3184, 0x0000 },
+       { 0x3185, 0xf000 },
+       { 0x3186, 0x0000 },
+       { 0x3187, 0xf000 },
+       { 0x3188, 0x0000 },
+       { 0x3189, 0xf000 },
+       { 0x318a, 0x0000 },
+       { 0x318b, 0xf000 },
+       { 0x318c, 0x0000 },
+       { 0x318d, 0xf000 },
+       { 0x318e, 0x0000 },
+       { 0x318f, 0xf000 },
+       { 0x3190, 0x0000 },
+       { 0x3191, 0xf000 },
+       { 0x3192, 0x0000 },
+       { 0x3193, 0xf000 },
+       { 0x3194, 0x0000 },
+       { 0x3195, 0xf000 },
+       { 0x3196, 0x0000 },
+       { 0x3197, 0xf000 },
+       { 0x3198, 0x0000 },
+       { 0x3199, 0xf000 },
+       { 0x319a, 0x0000 },
+       { 0x319b, 0xf000 },
+       { 0x319c, 0x0000 },
+       { 0x319d, 0xf000 },
+       { 0x319e, 0x0000 },
+       { 0x319f, 0xf000 },
+       { 0x31a0, 0x0000 },
+       { 0x31a1, 0xf000 },
+       { 0x31a2, 0x0000 },
+       { 0x31a3, 0xf000 },
+       { 0x31a4, 0x0000 },
+       { 0x31a5, 0xf000 },
+       { 0x31a6, 0x0000 },
+       { 0x31a7, 0xf000 },
+       { 0x31a8, 0x0000 },
+       { 0x31a9, 0xf000 },
+       { 0x31aa, 0x0000 },
+       { 0x31ab, 0xf000 },
+       { 0x31ac, 0x0000 },
+       { 0x31ad, 0xf000 },
+       { 0x31ae, 0x0000 },
+       { 0x31af, 0xf000 },
+       { 0x31b0, 0x0000 },
+       { 0x31b1, 0xf000 },
+       { 0x31b2, 0x0000 },
+       { 0x31b3, 0xf000 },
+       { 0x31b4, 0x0000 },
+       { 0x31b5, 0xf000 },
+       { 0x31b6, 0x0000 },
+       { 0x31b7, 0xf000 },
+       { 0x31b8, 0x0000 },
+       { 0x31b9, 0xf000 },
+       { 0x31ba, 0x0000 },
+       { 0x31bb, 0xf000 },
+       { 0x31bc, 0x0000 },
+       { 0x31bd, 0xf000 },
+       { 0x31be, 0x0000 },
+       { 0x31bf, 0xf000 },
+       { 0x31c0, 0x0000 },
+       { 0x31c1, 0xf000 },
+       { 0x31c2, 0x0000 },
+       { 0x31c3, 0xf000 },
+       { 0x31c4, 0x0000 },
+       { 0x31c5, 0xf000 },
+       { 0x31c6, 0x0000 },
+       { 0x31c7, 0xf000 },
+       { 0x31c8, 0x0000 },
+       { 0x31c9, 0xf000 },
+       { 0x31ca, 0x0000 },
+       { 0x31cb, 0xf000 },
+       { 0x31cc, 0x0000 },
+       { 0x31cd, 0xf000 },
+       { 0x31ce, 0x0000 },
+       { 0x31cf, 0xf000 },
+       { 0x31d0, 0x0000 },
+       { 0x31d1, 0xf000 },
+       { 0x31d2, 0x0000 },
+       { 0x31d3, 0xf000 },
+       { 0x31d4, 0x0000 },
+       { 0x31d5, 0xf000 },
+       { 0x31d6, 0x0000 },
+       { 0x31d7, 0xf000 },
+       { 0x31d8, 0x0000 },
+       { 0x31d9, 0xf000 },
+       { 0x31da, 0x0000 },
+       { 0x31db, 0xf000 },
+       { 0x31dc, 0x0000 },
+       { 0x31dd, 0xf000 },
+       { 0x31de, 0x0000 },
+       { 0x31df, 0xf000 },
+       { 0x31e0, 0x0000 },
+       { 0x31e1, 0xf000 },
+       { 0x31e2, 0x0000 },
+       { 0x31e3, 0xf000 },
+       { 0x31e4, 0x0000 },
+       { 0x31e5, 0xf000 },
+       { 0x31e6, 0x0000 },
+       { 0x31e7, 0xf000 },
+       { 0x31e8, 0x0000 },
+       { 0x31e9, 0xf000 },
+       { 0x31ea, 0x0000 },
+       { 0x31eb, 0xf000 },
+       { 0x31ec, 0x0000 },
+       { 0x31ed, 0xf000 },
+       { 0x31ee, 0x0000 },
+       { 0x31ef, 0xf000 },
+       { 0x31f0, 0x0000 },
+       { 0x31f1, 0xf000 },
+       { 0x31f2, 0x0000 },
+       { 0x31f3, 0xf000 },
+       { 0x31f4, 0x0000 },
+       { 0x31f5, 0xf000 },
+       { 0x31f6, 0x0000 },
+       { 0x31f7, 0xf000 },
+       { 0x31f8, 0x0000 },
+       { 0x31f9, 0xf000 },
+       { 0x31fa, 0x0000 },
+       { 0x31fb, 0xf000 },
+       { 0x31fc, 0x0000 },
+       { 0x31fd, 0xf000 },
+       { 0x31fe, 0x0000 },
+       { 0x31ff, 0xf000 },
+       { 0x024d, 0xff50 },
+       { 0x0252, 0xff50 },
+       { 0x0259, 0x0112 },
+       { 0x025e, 0x0112 },
+};
+
+static int wm5102_sysclk_ev(struct snd_soc_dapm_widget *w,
+                           struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_codec *codec = w->codec;
+       struct arizona *arizona = dev_get_drvdata(codec->dev);
+       struct regmap *regmap = codec->control_data;
+       const struct reg_default *patch = NULL;
+       int i, patch_size;
+
+       switch (arizona->rev) {
+       case 0:
+               patch = wm5102_sysclk_reva_patch;
+               patch_size = ARRAY_SIZE(wm5102_sysclk_reva_patch);
+               break;
+       }
+
+       switch (event) {
+       case SND_SOC_DAPM_POST_PMU:
+               if (patch)
+                       for (i = 0; i < patch_size; i++)
+                               regmap_write(regmap, patch[i].reg,
+                                            patch[i].def);
+               break;
+
+       default:
+               break;
+       }
+
+       return 0;
+}
+
 static const struct snd_kcontrol_new wm5102_snd_controls[] = {
 SOC_SINGLE("IN1 High Performance Switch", ARIZONA_IN1L_CONTROL,
           ARIZONA_IN1_OSR_SHIFT, 1, 0),
@@ -297,7 +847,7 @@ static const struct snd_kcontrol_new wm5102_aec_loopback_mux =
 
 static const struct snd_soc_dapm_widget wm5102_dapm_widgets[] = {
 SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT,
-                   0, NULL, 0),
+                   0, wm5102_sysclk_ev, SND_SOC_DAPM_POST_PMU),
 SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1,
                    ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0),
 SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK,
index 5421fd9..4c0a8e4 100644 (file)
@@ -782,7 +782,7 @@ static int wm8978_hw_params(struct snd_pcm_substream *substream,
                wm8978->mclk_idx = -1;
                f_sel = wm8978->f_mclk;
        } else {
-               if (!wm8978->f_pllout) {
+               if (!wm8978->f_opclk) {
                        /* We only enter here, if OPCLK is not used */
                        int ret = wm8978_configure_pll(codec);
                        if (ret < 0)
index 3fddc7a..b2b2b37 100644 (file)
@@ -3722,7 +3722,7 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
        } while (count--);
 
        if (count == 0)
-               dev_warn(codec->dev, "No impedence range reported for jack\n");
+               dev_warn(codec->dev, "No impedance range reported for jack\n");
 
 #ifndef CONFIG_SND_SOC_WM8994_MODULE
        trace_snd_soc_jack_irq(dev_name(codec->dev));
index b9f1659..2ba0814 100644 (file)
@@ -71,7 +71,6 @@ static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id)
                printk(KERN_WARNING "%s: got err interrupt 0x%lx\n",
                                __func__, cause);
                writel(cause, priv->io + KIRKWOOD_ERR_CAUSE);
-               return IRQ_HANDLED;
        }
 
        /* we've enabled only bytes interrupts ... */
@@ -178,7 +177,7 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
        }
 
        dram = mv_mbus_dram_info();
-       addr = virt_to_phys(substream->dma_buffer.area);
+       addr = substream->dma_buffer.addr;
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                prdata->play_stream = substream;
                kirkwood_dma_conf_mbus_windows(priv->io,
index 542538d..1d5db48 100644 (file)
@@ -95,7 +95,7 @@ static inline void kirkwood_set_dco(void __iomem *io, unsigned long rate)
        do {
                cpu_relax();
                value = readl(io + KIRKWOOD_DCO_SPCR_STATUS);
-               value &= KIRKWOOD_DCO_SPCR_STATUS;
+               value &= KIRKWOOD_DCO_SPCR_STATUS_DCO_LOCK;
        } while (value == 0);
 }
 
@@ -180,67 +180,72 @@ static int kirkwood_i2s_play_trigger(struct snd_pcm_substream *substream,
                                int cmd, struct snd_soc_dai *dai)
 {
        struct kirkwood_dma_data *priv = snd_soc_dai_get_drvdata(dai);
-       unsigned long value;
-
-       /*
-        * specs says KIRKWOOD_PLAYCTL must be read 2 times before
-        * changing it. So read 1 time here and 1 later.
-        */
-       value = readl(priv->io + KIRKWOOD_PLAYCTL);
+       uint32_t ctl, value;
+
+       ctl = readl(priv->io + KIRKWOOD_PLAYCTL);
+       if (ctl & KIRKWOOD_PLAYCTL_PAUSE) {
+               unsigned timeout = 5000;
+               /*
+                * The Armada510 spec says that if we enter pause mode, the
+                * busy bit must be read back as clear _twice_.  Make sure
+                * we respect that otherwise we get DMA underruns.
+                */
+               do {
+                       value = ctl;
+                       ctl = readl(priv->io + KIRKWOOD_PLAYCTL);
+                       if (!((ctl | value) & KIRKWOOD_PLAYCTL_PLAY_BUSY))
+                               break;
+                       udelay(1);
+               } while (timeout--);
+
+               if ((ctl | value) & KIRKWOOD_PLAYCTL_PLAY_BUSY)
+                       dev_notice(dai->dev, "timed out waiting for busy to deassert: %08x\n",
+                                  ctl);
+       }
 
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
-               /* stop audio, enable interrupts */
-               value = readl(priv->io + KIRKWOOD_PLAYCTL);
-               value |= KIRKWOOD_PLAYCTL_PAUSE;
-               writel(value, priv->io + KIRKWOOD_PLAYCTL);
-
                value = readl(priv->io + KIRKWOOD_INT_MASK);
                value |= KIRKWOOD_INT_CAUSE_PLAY_BYTES;
                writel(value, priv->io + KIRKWOOD_INT_MASK);
 
                /* configure audio & enable i2s playback */
-               value = readl(priv->io + KIRKWOOD_PLAYCTL);
-               value &= ~KIRKWOOD_PLAYCTL_BURST_MASK;
-               value &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE
+               ctl &= ~KIRKWOOD_PLAYCTL_BURST_MASK;
+               ctl &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE
                                | KIRKWOOD_PLAYCTL_SPDIF_EN);
 
                if (priv->burst == 32)
-                       value |= KIRKWOOD_PLAYCTL_BURST_32;
+                       ctl |= KIRKWOOD_PLAYCTL_BURST_32;
                else
-                       value |= KIRKWOOD_PLAYCTL_BURST_128;
-               value |= KIRKWOOD_PLAYCTL_I2S_EN;
-               writel(value, priv->io + KIRKWOOD_PLAYCTL);
+                       ctl |= KIRKWOOD_PLAYCTL_BURST_128;
+               ctl |= KIRKWOOD_PLAYCTL_I2S_EN;
+               writel(ctl, priv->io + KIRKWOOD_PLAYCTL);
                break;
 
        case SNDRV_PCM_TRIGGER_STOP:
                /* stop audio, disable interrupts */
-               value = readl(priv->io + KIRKWOOD_PLAYCTL);
-               value |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE;
-               writel(value, priv->io + KIRKWOOD_PLAYCTL);
+               ctl |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE;
+               writel(ctl, priv->io + KIRKWOOD_PLAYCTL);
 
                value = readl(priv->io + KIRKWOOD_INT_MASK);
                value &= ~KIRKWOOD_INT_CAUSE_PLAY_BYTES;
                writel(value, priv->io + KIRKWOOD_INT_MASK);
 
                /* disable all playbacks */
-               value = readl(priv->io + KIRKWOOD_PLAYCTL);
-               value &= ~(KIRKWOOD_PLAYCTL_I2S_EN | KIRKWOOD_PLAYCTL_SPDIF_EN);
-               writel(value, priv->io + KIRKWOOD_PLAYCTL);
+               ctl &= ~(KIRKWOOD_PLAYCTL_I2S_EN | KIRKWOOD_PLAYCTL_SPDIF_EN);
+               writel(ctl, priv->io + KIRKWOOD_PLAYCTL);
                break;
 
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
        case SNDRV_PCM_TRIGGER_SUSPEND:
-               value = readl(priv->io + KIRKWOOD_PLAYCTL);
-               value |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE;
-               writel(value, priv->io + KIRKWOOD_PLAYCTL);
+               ctl |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE;
+               writel(ctl, priv->io + KIRKWOOD_PLAYCTL);
                break;
 
        case SNDRV_PCM_TRIGGER_RESUME:
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-               value = readl(priv->io + KIRKWOOD_PLAYCTL);
-               value &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE);
-               writel(value, priv->io + KIRKWOOD_PLAYCTL);
+               ctl &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE);
+               writel(ctl, priv->io + KIRKWOOD_PLAYCTL);
                break;
 
        default:
@@ -260,11 +265,6 @@ static int kirkwood_i2s_rec_trigger(struct snd_pcm_substream *substream,
 
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
-               /* stop audio, enable interrupts */
-               value = readl(priv->io + KIRKWOOD_RECCTL);
-               value |= KIRKWOOD_RECCTL_PAUSE;
-               writel(value, priv->io + KIRKWOOD_RECCTL);
-
                value = readl(priv->io + KIRKWOOD_INT_MASK);
                value |= KIRKWOOD_INT_CAUSE_REC_BYTES;
                writel(value, priv->io + KIRKWOOD_INT_MASK);
index aa037b2..c294fbb 100644 (file)
@@ -523,16 +523,24 @@ static int mxs_saif_trigger(struct snd_pcm_substream *substream, int cmd,
 
                if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                        /*
-                        * write a data to saif data register to trigger
-                        * the transfer
+                        * write data to saif data register to trigger
+                        * the transfer.
+                        * For 24-bit format the 32-bit FIFO register stores
+                        * only one channel, so we need to write twice.
+                        * This is also safe for the other non 24-bit formats.
                         */
                        __raw_writel(0, saif->base + SAIF_DATA);
+                       __raw_writel(0, saif->base + SAIF_DATA);
                } else {
                        /*
-                        * read a data from saif data register to trigger
-                        * the receive
+                        * read data from saif data register to trigger
+                        * the receive.
+                        * For 24-bit format the 32-bit FIFO register stores
+                        * only one channel, so we need to read twice.
+                        * This is also safe for the other non 24-bit formats.
                         */
                        __raw_readl(saif->base + SAIF_DATA);
+                       __raw_readl(saif->base + SAIF_DATA);
                }
 
                master_saif->ongoing = 1;
@@ -812,3 +820,4 @@ module_platform_driver(mxs_saif_driver);
 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
 MODULE_DESCRIPTION("MXS ASoC SAIF driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-saif");
index e7b8317..3c7c3a5 100644 (file)
@@ -207,6 +207,8 @@ config SND_SOC_BELLS
        select SND_SOC_WM5102
        select SND_SOC_WM5110
        select SND_SOC_WM9081
+       select SND_SOC_WM0010
+       select SND_SOC_WM1250_EV1
 
 config SND_SOC_LOWLAND
        tristate "Audio support for Wolfson Lowland"
index b0d46d6..a2ca156 100644 (file)
@@ -212,7 +212,7 @@ static struct snd_soc_dai_link bells_dai_wm5102[] = {
        {
                .name = "Sub",
                .stream_name = "Sub",
-               .cpu_dai_name = "wm5110-aif3",
+               .cpu_dai_name = "wm5102-aif3",
                .codec_dai_name = "wm9081-hifi",
                .codec_name = "wm9081.1-006c",
                .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
index d119862..10d21be 100644 (file)
@@ -2786,8 +2786,9 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
        val = (ucontrol->value.integer.value[0] + min) & mask;
        val = val << shift;
 
-       if (snd_soc_update_bits_locked(codec, reg, val_mask, val))
-                       return err;
+       err = snd_soc_update_bits_locked(codec, reg, val_mask, val);
+       if (err < 0)
+               return err;
 
        if (snd_soc_volsw_is_stereo(mc)) {
                val_mask = mask << rshift;
index d0a4be3..6e35bca 100644 (file)
@@ -3745,7 +3745,7 @@ void snd_soc_dapm_shutdown(struct snd_soc_card *card)
 {
        struct snd_soc_codec *codec;
 
-       list_for_each_entry(codec, &card->codec_dev_list, list) {
+       list_for_each_entry(codec, &card->codec_dev_list, card_list) {
                soc_dapm_shutdown_codec(&codec->dapm);
                if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY)
                        snd_soc_dapm_set_bias_level(&codec->dapm,
index 282f0fc..dbf7999 100644 (file)
@@ -559,9 +559,11 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
                return;
 
        card = chip->card;
-       mutex_lock(&register_mutex);
        down_write(&chip->shutdown_rwsem);
        chip->shutdown = 1;
+       up_write(&chip->shutdown_rwsem);
+
+       mutex_lock(&register_mutex);
        chip->num_interfaces--;
        if (chip->num_interfaces <= 0) {
                snd_card_disconnect(card);
@@ -582,11 +584,9 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
                        snd_usb_mixer_disconnect(p);
                }
                usb_chip[chip->index] = NULL;
-               up_write(&chip->shutdown_rwsem);
                mutex_unlock(&register_mutex);
                snd_card_free_when_closed(card);
        } else {
-               up_write(&chip->shutdown_rwsem);
                mutex_unlock(&register_mutex);
        }
 }
index 7f78c6d..34de6f2 100644 (file)
@@ -35,6 +35,7 @@
 
 #define EP_FLAG_ACTIVATED      0
 #define EP_FLAG_RUNNING                1
+#define EP_FLAG_STOPPING       2
 
 /*
  * snd_usb_endpoint is a model that abstracts everything related to an
@@ -502,10 +503,20 @@ static int wait_clear_urbs(struct snd_usb_endpoint *ep)
        if (alive)
                snd_printk(KERN_ERR "timeout: still %d active urbs on EP #%x\n",
                                        alive, ep->ep_num);
+       clear_bit(EP_FLAG_STOPPING, &ep->flags);
 
        return 0;
 }
 
+/* sync the pending stop operation;
+ * this function itself doesn't trigger the stop operation
+ */
+void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep)
+{
+       if (ep && test_bit(EP_FLAG_STOPPING, &ep->flags))
+               wait_clear_urbs(ep);
+}
+
 /*
  * unlink active urbs.
  */
@@ -918,6 +929,8 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep,
 
                if (wait)
                        wait_clear_urbs(ep);
+               else
+                       set_bit(EP_FLAG_STOPPING, &ep->flags);
        }
 }
 
index 6376ccf..3d4c970 100644 (file)
@@ -19,6 +19,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
 int  snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep);
 void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep,
                           int force, int can_sleep, int wait);
+void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep);
 int  snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
 int  snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep);
 void snd_usb_endpoint_free(struct list_head *head);
index c83f614..eeefbce 100644 (file)
@@ -148,6 +148,7 @@ struct snd_usb_midi_out_endpoint {
                struct snd_usb_midi_out_endpoint* ep;
                struct snd_rawmidi_substream *substream;
                int active;
+               bool autopm_reference;
                uint8_t cable;          /* cable number << 4 */
                uint8_t state;
 #define STATE_UNKNOWN  0
@@ -1076,7 +1077,8 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
                return -ENXIO;
        }
        err = usb_autopm_get_interface(umidi->iface);
-       if (err < 0)
+       port->autopm_reference = err >= 0;
+       if (err < 0 && err != -EACCES)
                return -EIO;
        substream->runtime->private_data = port;
        port->state = STATE_UNKNOWN;
@@ -1087,9 +1089,11 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
 static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream)
 {
        struct snd_usb_midi* umidi = substream->rmidi->private_data;
+       struct usbmidi_out_port *port = substream->runtime->private_data;
 
        substream_open(substream, 0);
-       usb_autopm_put_interface(umidi->iface);
+       if (port->autopm_reference)
+               usb_autopm_put_interface(umidi->iface);
        return 0;
 }
 
index 37428f7..ef6fa24 100644 (file)
@@ -459,7 +459,7 @@ static int configure_endpoint(struct snd_usb_substream *subs)
                return ret;
 
        if (subs->sync_endpoint)
-               ret = snd_usb_endpoint_set_params(subs->data_endpoint,
+               ret = snd_usb_endpoint_set_params(subs->sync_endpoint,
                                                  subs->pcm_format,
                                                  subs->channels,
                                                  subs->period_bytes,
@@ -568,6 +568,9 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
                goto unlock;
        }
 
+       snd_usb_endpoint_sync_pending_stop(subs->sync_endpoint);
+       snd_usb_endpoint_sync_pending_stop(subs->data_endpoint);
+
        ret = set_format(subs, subs->cur_audiofmt);
        if (ret < 0)
                goto unlock;
index 2655ae9..ea095ab 100644 (file)
@@ -206,8 +206,10 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr)
        retval = pread(fd, msr, sizeof *msr, offset);
        close(fd);
 
-       if (retval != sizeof *msr)
+       if (retval != sizeof *msr) {
+               fprintf(stderr, "%s offset 0x%zx read failed\n", pathname, offset);
                return -1;
+       }
 
        return 0;
 }
@@ -1101,7 +1103,9 @@ void turbostat_loop()
 
 restart:
        retval = for_all_cpus(get_counters, EVEN_COUNTERS);
-       if (retval) {
+       if (retval < -1) {
+               exit(retval);
+       } else if (retval == -1) {
                re_initialize();
                goto restart;
        }
@@ -1114,7 +1118,9 @@ restart:
                }
                sleep(interval_sec);
                retval = for_all_cpus(get_counters, ODD_COUNTERS);
-               if (retval) {
+               if (retval < -1) {
+                       exit(retval);
+               } else if (retval == -1) {
                        re_initialize();
                        goto restart;
                }
@@ -1126,7 +1132,9 @@ restart:
                flush_stdout();
                sleep(interval_sec);
                retval = for_all_cpus(get_counters, EVEN_COUNTERS);
-               if (retval) {
+               if (retval < -1) {
+                       exit(retval);
+               } else if (retval == -1) {
                        re_initialize();
                        goto restart;
                }
@@ -1545,8 +1553,11 @@ void turbostat_init()
 int fork_it(char **argv)
 {
        pid_t child_pid;
+       int status;
 
-       for_all_cpus(get_counters, EVEN_COUNTERS);
+       status = for_all_cpus(get_counters, EVEN_COUNTERS);
+       if (status)
+               exit(status);
        /* clear affinity side-effect of get_counters() */
        sched_setaffinity(0, cpu_present_setsize, cpu_present_set);
        gettimeofday(&tv_even, (struct timezone *)NULL);
@@ -1556,7 +1567,6 @@ int fork_it(char **argv)
                /* child */
                execvp(argv[0], argv);
        } else {
-               int status;
 
                /* parent */
                if (child_pid == -1) {
@@ -1568,7 +1578,7 @@ int fork_it(char **argv)
                signal(SIGQUIT, SIG_IGN);
                if (waitpid(child_pid, &status, 0) == -1) {
                        perror("wait");
-                       exit(1);
+                       exit(status);
                }
        }
        /*
@@ -1585,7 +1595,7 @@ int fork_it(char **argv)
 
        fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);
 
-       return 0;
+       return status;
 }
 
 void cmdline(int argc, char **argv)
@@ -1594,7 +1604,7 @@ void cmdline(int argc, char **argv)
 
        progname = argv[0];
 
-       while ((opt = getopt(argc, argv, "+pPSvisc:sC:m:M:")) != -1) {
+       while ((opt = getopt(argc, argv, "+pPSvi:sc:sC:m:M:")) != -1) {
                switch (opt) {
                case 'p':
                        show_core_only++;
index 4348014..85baf11 100644 (file)
@@ -1,4 +1,4 @@
-TARGETS = breakpoints kcmp mqueue vm cpu-hotplug memory-hotplug epoll
+TARGETS = breakpoints kcmp mqueue vm cpu-hotplug memory-hotplug
 
 all:
        for TARGET in $(TARGETS); do \
diff --git a/tools/testing/selftests/epoll/Makefile b/tools/testing/selftests/epoll/Makefile
deleted file mode 100644 (file)
index 19806ed..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-# Makefile for epoll selftests
-
-all: test_epoll
-%: %.c
-       gcc -pthread -g -o $@ $^
-
-run_tests: all
-       ./test_epoll
-
-clean:
-       $(RM) test_epoll
diff --git a/tools/testing/selftests/epoll/test_epoll.c b/tools/testing/selftests/epoll/test_epoll.c
deleted file mode 100644 (file)
index f752539..0000000
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- *  tools/testing/selftests/epoll/test_epoll.c
- *
- *  Copyright 2012 Adobe Systems Incorporated
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  Paton J. Lewis <palewis@adobe.com>
- *
- */
-
-#include <errno.h>
-#include <fcntl.h>
-#include <pthread.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <sys/epoll.h>
-#include <sys/socket.h>
-
-/*
- * A pointer to an epoll_item_private structure will be stored in the epoll
- * item's event structure so that we can get access to the epoll_item_private
- * data after calling epoll_wait:
- */
-struct epoll_item_private {
-       int index;  /* Position of this struct within the epoll_items array. */
-       int fd;
-       uint32_t events;
-       pthread_mutex_t mutex;  /* Guards the following variables... */
-       int stop;
-       int status;  /* Stores any error encountered while handling item. */
-       /* The following variable allows us to test whether we have encountered
-          a problem while attempting to cancel and delete the associated
-          event. When the test program exits, 'deleted' should be exactly
-          one. If it is greater than one, then the failed test reflects a real
-          world situation where we would have tried to access the epoll item's
-          private data after deleting it: */
-       int deleted;
-};
-
-struct epoll_item_private *epoll_items;
-
-/*
- * Delete the specified item from the epoll set. In a real-world secneario this
- * is where we would free the associated data structure, but in this testing
- * environment we retain the structure so that we can test for double-deletion:
- */
-void delete_item(int index)
-{
-       __sync_fetch_and_add(&epoll_items[index].deleted, 1);
-}
-
-/*
- * A pointer to a read_thread_data structure will be passed as the argument to
- * each read thread:
- */
-struct read_thread_data {
-       int stop;
-       int status;  /* Indicates any error encountered by the read thread. */
-       int epoll_set;
-};
-
-/*
- * The function executed by the read threads:
- */
-void *read_thread_function(void *function_data)
-{
-       struct read_thread_data *thread_data =
-               (struct read_thread_data *)function_data;
-       struct epoll_event event_data;
-       struct epoll_item_private *item_data;
-       char socket_data;
-
-       /* Handle events until we encounter an error or this thread's 'stop'
-          condition is set: */
-       while (1) {
-               int result = epoll_wait(thread_data->epoll_set,
-                                       &event_data,
-                                       1,      /* Number of desired events */
-                                       1000);  /* Timeout in ms */
-               if (result < 0) {
-                       /* Breakpoints signal all threads. Ignore that while
-                          debugging: */
-                       if (errno == EINTR)
-                               continue;
-                       thread_data->status = errno;
-                       return 0;
-               } else if (thread_data->stop)
-                       return 0;
-               else if (result == 0)  /* Timeout */
-                       continue;
-
-               /* We need the mutex here because checking for the stop
-                  condition and re-enabling the epoll item need to be done
-                  together as one atomic operation when EPOLL_CTL_DISABLE is
-                  available: */
-               item_data = (struct epoll_item_private *)event_data.data.ptr;
-               pthread_mutex_lock(&item_data->mutex);
-
-               /* Remove the item from the epoll set if we want to stop
-                  handling that event: */
-               if (item_data->stop)
-                       delete_item(item_data->index);
-               else {
-                       /* Clear the data that was written to the other end of
-                          our non-blocking socket: */
-                       do {
-                               if (read(item_data->fd, &socket_data, 1) < 1) {
-                                       if ((errno == EAGAIN) ||
-                                           (errno == EWOULDBLOCK))
-                                               break;
-                                       else
-                                               goto error_unlock;
-                               }
-                       } while (item_data->events & EPOLLET);
-
-                       /* The item was one-shot, so re-enable it: */
-                       event_data.events = item_data->events;
-                       if (epoll_ctl(thread_data->epoll_set,
-                                                 EPOLL_CTL_MOD,
-                                                 item_data->fd,
-                                                 &event_data) < 0)
-                               goto error_unlock;
-               }
-
-               pthread_mutex_unlock(&item_data->mutex);
-       }
-
-error_unlock:
-       thread_data->status = item_data->status = errno;
-       pthread_mutex_unlock(&item_data->mutex);
-       return 0;
-}
-
-/*
- * A pointer to a write_thread_data structure will be passed as the argument to
- * the write thread:
- */
-struct write_thread_data {
-       int stop;
-       int status;  /* Indicates any error encountered by the write thread. */
-       int n_fds;
-       int *fds;
-};
-
-/*
- * The function executed by the write thread. It writes a single byte to each
- * socket in turn until the stop condition for this thread is set. If writing to
- * a socket would block (i.e. errno was EAGAIN), we leave that socket alone for
- * the moment and just move on to the next socket in the list. We don't care
- * about the order in which we deliver events to the epoll set. In fact we don't
- * care about the data we're writing to the pipes at all; we just want to
- * trigger epoll events:
- */
-void *write_thread_function(void *function_data)
-{
-       const char data = 'X';
-       int index;
-       struct write_thread_data *thread_data =
-               (struct write_thread_data *)function_data;
-       while (!thread_data->stop)
-               for (index = 0;
-                    !thread_data->stop && (index < thread_data->n_fds);
-                    ++index)
-                       if ((write(thread_data->fds[index], &data, 1) < 1) &&
-                               (errno != EAGAIN) &&
-                               (errno != EWOULDBLOCK)) {
-                               thread_data->status = errno;
-                               return;
-                       }
-}
-
-/*
- * Arguments are currently ignored:
- */
-int main(int argc, char **argv)
-{
-       const int n_read_threads = 100;
-       const int n_epoll_items = 500;
-       int index;
-       int epoll_set = epoll_create1(0);
-       struct write_thread_data write_thread_data = {
-               0, 0, n_epoll_items, malloc(n_epoll_items * sizeof(int))
-       };
-       struct read_thread_data *read_thread_data =
-               malloc(n_read_threads * sizeof(struct read_thread_data));
-       pthread_t *read_threads = malloc(n_read_threads * sizeof(pthread_t));
-       pthread_t write_thread;
-
-       printf("-----------------\n");
-       printf("Runing test_epoll\n");
-       printf("-----------------\n");
-
-       epoll_items = malloc(n_epoll_items * sizeof(struct epoll_item_private));
-
-       if (epoll_set < 0 || epoll_items == 0 || write_thread_data.fds == 0 ||
-               read_thread_data == 0 || read_threads == 0)
-               goto error;
-
-       if (sysconf(_SC_NPROCESSORS_ONLN) < 2) {
-               printf("Error: please run this test on a multi-core system.\n");
-               goto error;
-       }
-
-       /* Create the socket pairs and epoll items: */
-       for (index = 0; index < n_epoll_items; ++index) {
-               int socket_pair[2];
-               struct epoll_event event_data;
-               if (socketpair(AF_UNIX,
-                              SOCK_STREAM | SOCK_NONBLOCK,
-                              0,
-                              socket_pair) < 0)
-                       goto error;
-               write_thread_data.fds[index] = socket_pair[0];
-               epoll_items[index].index = index;
-               epoll_items[index].fd = socket_pair[1];
-               if (pthread_mutex_init(&epoll_items[index].mutex, NULL) != 0)
-                       goto error;
-               /* We always use EPOLLONESHOT because this test is currently
-                  structured to demonstrate the need for EPOLL_CTL_DISABLE,
-                  which only produces useful information in the EPOLLONESHOT
-                  case (without EPOLLONESHOT, calling epoll_ctl with
-                  EPOLL_CTL_DISABLE will never return EBUSY). If support for
-                  testing events without EPOLLONESHOT is desired, it should
-                  probably be implemented in a separate unit test. */
-               epoll_items[index].events = EPOLLIN | EPOLLONESHOT;
-               if (index < n_epoll_items / 2)
-                       epoll_items[index].events |= EPOLLET;
-               epoll_items[index].stop = 0;
-               epoll_items[index].status = 0;
-               epoll_items[index].deleted = 0;
-               event_data.events = epoll_items[index].events;
-               event_data.data.ptr = &epoll_items[index];
-               if (epoll_ctl(epoll_set,
-                             EPOLL_CTL_ADD,
-                             epoll_items[index].fd,
-                             &event_data) < 0)
-                       goto error;
-       }
-
-       /* Create and start the read threads: */
-       for (index = 0; index < n_read_threads; ++index) {
-               read_thread_data[index].stop = 0;
-               read_thread_data[index].status = 0;
-               read_thread_data[index].epoll_set = epoll_set;
-               if (pthread_create(&read_threads[index],
-                                  NULL,
-                                  read_thread_function,
-                                  &read_thread_data[index]) != 0)
-                       goto error;
-       }
-
-       if (pthread_create(&write_thread,
-                          NULL,
-                          write_thread_function,
-                          &write_thread_data) != 0)
-               goto error;
-
-       /* Cancel all event pollers: */
-#ifdef EPOLL_CTL_DISABLE
-       for (index = 0; index < n_epoll_items; ++index) {
-               pthread_mutex_lock(&epoll_items[index].mutex);
-               ++epoll_items[index].stop;
-               if (epoll_ctl(epoll_set,
-                             EPOLL_CTL_DISABLE,
-                             epoll_items[index].fd,
-                             NULL) == 0)
-                       delete_item(index);
-               else if (errno != EBUSY) {
-                       pthread_mutex_unlock(&epoll_items[index].mutex);
-                       goto error;
-               }
-               /* EBUSY means events were being handled; allow the other thread
-                  to delete the item. */
-               pthread_mutex_unlock(&epoll_items[index].mutex);
-       }
-#else
-       for (index = 0; index < n_epoll_items; ++index) {
-               pthread_mutex_lock(&epoll_items[index].mutex);
-               ++epoll_items[index].stop;
-               pthread_mutex_unlock(&epoll_items[index].mutex);
-               /* Wait in case a thread running read_thread_function is
-                  currently executing code between epoll_wait and
-                  pthread_mutex_lock with this item. Note that a longer delay
-                  would make double-deletion less likely (at the expense of
-                  performance), but there is no guarantee that any delay would
-                  ever be sufficient. Note also that we delete all event
-                  pollers at once for testing purposes, but in a real-world
-                  environment we are likely to want to be able to cancel event
-                  pollers at arbitrary times. Therefore we can't improve this
-                  situation by just splitting this loop into two loops
-                  (i.e. signal 'stop' for all items, sleep, and then delete all
-                  items). We also can't fix the problem via EPOLL_CTL_DEL
-                  because that command can't prevent the case where some other
-                  thread is executing read_thread_function within the region
-                  mentioned above: */
-               usleep(1);
-               pthread_mutex_lock(&epoll_items[index].mutex);
-               if (!epoll_items[index].deleted)
-                       delete_item(index);
-               pthread_mutex_unlock(&epoll_items[index].mutex);
-       }
-#endif
-
-       /* Shut down the read threads: */
-       for (index = 0; index < n_read_threads; ++index)
-               __sync_fetch_and_add(&read_thread_data[index].stop, 1);
-       for (index = 0; index < n_read_threads; ++index) {
-               if (pthread_join(read_threads[index], NULL) != 0)
-                       goto error;
-               if (read_thread_data[index].status)
-                       goto error;
-       }
-
-       /* Shut down the write thread: */
-       __sync_fetch_and_add(&write_thread_data.stop, 1);
-       if ((pthread_join(write_thread, NULL) != 0) || write_thread_data.status)
-               goto error;
-
-       /* Check for final error conditions: */
-       for (index = 0; index < n_epoll_items; ++index) {
-               if (epoll_items[index].status != 0)
-                       goto error;
-               if (pthread_mutex_destroy(&epoll_items[index].mutex) < 0)
-                       goto error;
-       }
-       for (index = 0; index < n_epoll_items; ++index)
-               if (epoll_items[index].deleted != 1) {
-                       printf("Error: item data deleted %1d times.\n",
-                                  epoll_items[index].deleted);
-                       goto error;
-               }
-
-       printf("[PASS]\n");
-       return 0;
-
- error:
-       printf("[FAIL]\n");
-       return errno;
-}