Merge branch 'drm-core-next' of git://people.freedesktop.org/~airlied/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 10 Jan 2012 19:04:36 +0000 (11:04 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 10 Jan 2012 19:04:36 +0000 (11:04 -0800)
* 'drm-core-next' of git://people.freedesktop.org/~airlied/linux: (307 commits)
  drm/nouveau/pm: fix build with HWMON off
  gma500: silence gcc warnings in mid_get_vbt_data()
  drm/ttm: fix condition (and vs or)
  drm/radeon: double lock typo in radeon_vm_bo_rmv()
  drm/radeon: use after free in radeon_vm_bo_add()
  drm/sis|via: don't return stack garbage from free_mem ioctl
  drm/radeon/kms: remove pointless CS flags priority struct
  drm/radeon/kms: check if vm is supported in VA ioctl
  drm: introduce drm_can_sleep and use in intel/radeon drivers. (v2)
  radeon: Fix disabling PCI bus mastering on big endian hosts.
  ttm: fix agp since ttm tt rework
  agp: Fix multi-line warning message whitespace
  drm/ttm/dma: Fix accounting error when calling ttm_mem_global_free_page and don't try to free freed pages.
  drm/ttm/dma: Only call set_pages_array_wb when the page is not in WB pool.
  drm/radeon/kms: sync across multiple rings when doing bo moves v3
  drm/radeon/kms: Add support for multi-ring sync in CS ioctl (v2)
  drm/radeon: GPU virtual memory support v22
  drm: make DRM_UNLOCKED ioctls with their own mutex
  drm: no need to hold global mutex for static data
  drm/radeon/benchmark: common modes sweep ignores 640x480@32
  ...

Fix up trivial conflicts in radeon/evergreen.c and vmwgfx/vmwgfx_kms.c

288 files changed:
drivers/char/agp/generic.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/drm_context.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_edid_modes.h
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/drm_lock.c
drivers/gpu/drm/drm_sman.c [deleted file]
drivers/gpu/drm/exynos/Kconfig
drivers/gpu/drm/exynos/Makefile
drivers/gpu/drm/exynos/exynos_ddc.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_buf.c
drivers/gpu/drm/exynos/exynos_drm_buf.h
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_encoder.c
drivers/gpu/drm/exynos/exynos_drm_encoder.h
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/exynos/exynos_drm_fb.h
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.h
drivers/gpu/drm/exynos/exynos_drm_hdmi.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_hdmi.h [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_plane.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_plane.h [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_hdmi.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_hdmi.h [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_hdmiphy.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_mixer.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_mixer.h [new file with mode: 0644]
drivers/gpu/drm/exynos/regs-hdmi.h [new file with mode: 0644]
drivers/gpu/drm/exynos/regs-mixer.h [new file with mode: 0644]
drivers/gpu/drm/exynos/regs-vp.h [new file with mode: 0644]
drivers/gpu/drm/gma500/Kconfig [new file with mode: 0644]
drivers/gpu/drm/gma500/Makefile [new file with mode: 0644]
drivers/gpu/drm/gma500/accel_2d.c [new file with mode: 0644]
drivers/gpu/drm/gma500/backlight.c [new file with mode: 0644]
drivers/gpu/drm/gma500/cdv_device.c [new file with mode: 0644]
drivers/gpu/drm/gma500/cdv_device.h [new file with mode: 0644]
drivers/gpu/drm/gma500/cdv_intel_crt.c [new file with mode: 0644]
drivers/gpu/drm/gma500/cdv_intel_display.c [new file with mode: 0644]
drivers/gpu/drm/gma500/cdv_intel_hdmi.c [new file with mode: 0644]
drivers/gpu/drm/gma500/cdv_intel_lvds.c [new file with mode: 0644]
drivers/gpu/drm/gma500/framebuffer.c [new file with mode: 0644]
drivers/gpu/drm/gma500/framebuffer.h [new file with mode: 0644]
drivers/gpu/drm/gma500/gem.c [new file with mode: 0644]
drivers/gpu/drm/gma500/gem_glue.c [new file with mode: 0644]
drivers/gpu/drm/gma500/gem_glue.h [new file with mode: 0644]
drivers/gpu/drm/gma500/gtt.c [new file with mode: 0644]
drivers/gpu/drm/gma500/gtt.h [new file with mode: 0644]
drivers/gpu/drm/gma500/intel_bios.c [new file with mode: 0644]
drivers/gpu/drm/gma500/intel_bios.h [new file with mode: 0644]
drivers/gpu/drm/gma500/intel_gmbus.c [new file with mode: 0644]
drivers/gpu/drm/gma500/intel_i2c.c [new file with mode: 0644]
drivers/gpu/drm/gma500/intel_opregion.c [new file with mode: 0644]
drivers/gpu/drm/gma500/mid_bios.c [new file with mode: 0644]
drivers/gpu/drm/gma500/mid_bios.h [new file with mode: 0644]
drivers/gpu/drm/gma500/mmu.c [new file with mode: 0644]
drivers/gpu/drm/gma500/oaktrail.h [new file with mode: 0644]
drivers/gpu/drm/gma500/oaktrail_crtc.c [new file with mode: 0644]
drivers/gpu/drm/gma500/oaktrail_device.c [new file with mode: 0644]
drivers/gpu/drm/gma500/oaktrail_hdmi.c [new file with mode: 0644]
drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c [new file with mode: 0644]
drivers/gpu/drm/gma500/oaktrail_lvds.c [new file with mode: 0644]
drivers/gpu/drm/gma500/power.c [new file with mode: 0644]
drivers/gpu/drm/gma500/power.h [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_device.c [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_drv.c [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_drv.h [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_intel_display.c [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_intel_display.h [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_intel_drv.h [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_intel_lvds.c [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_intel_modes.c [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_intel_reg.h [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_intel_sdvo.c [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_irq.c [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_irq.h [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_lid.c [new file with mode: 0644]
drivers/gpu/drm/gma500/psb_reg.h [new file with mode: 0644]
drivers/gpu/drm/i810/i810_dma.c
drivers/gpu/drm/i810/i810_drv.c
drivers/gpu/drm/i810/i810_drv.h
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_sprite.c [new file with mode: 0644]
drivers/gpu/drm/mga/mga_drv.c
drivers/gpu/drm/nouveau/Makefile
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_bios.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_channel.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_connector.h
drivers/gpu/drm/nouveau/nouveau_crtc.h
drivers/gpu/drm/nouveau/nouveau_debugfs.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_dma.c
drivers/gpu/drm/nouveau/nouveau_dp.c
drivers/gpu/drm/nouveau/nouveau_drv.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_fb.h
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_gpio.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_gpio.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_hdmi.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_hwsq.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_i2c.c
drivers/gpu/drm/nouveau/nouveau_i2c.h
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/nouveau/nouveau_mxm.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_notifier.c
drivers/gpu/drm/nouveau/nouveau_object.c
drivers/gpu/drm/nouveau/nouveau_perf.c
drivers/gpu/drm/nouveau/nouveau_pm.c
drivers/gpu/drm/nouveau/nouveau_pm.h
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/nouveau/nouveau_state.c
drivers/gpu/drm/nouveau/nouveau_temp.c
drivers/gpu/drm/nouveau/nouveau_vm.c
drivers/gpu/drm/nouveau/nouveau_vm.h
drivers/gpu/drm/nouveau/nouveau_volt.c
drivers/gpu/drm/nouveau/nv04_crtc.c
drivers/gpu/drm/nouveau/nv04_dac.c
drivers/gpu/drm/nouveau/nv04_dfp.c
drivers/gpu/drm/nouveau/nv04_display.c
drivers/gpu/drm/nouveau/nv04_pm.c
drivers/gpu/drm/nouveau/nv04_timer.c
drivers/gpu/drm/nouveau/nv10_gpio.c
drivers/gpu/drm/nouveau/nv17_tv.c
drivers/gpu/drm/nouveau/nv40_pm.c
drivers/gpu/drm/nouveau/nv50_crtc.c
drivers/gpu/drm/nouveau/nv50_dac.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nv50_display.h
drivers/gpu/drm/nouveau/nv50_evo.c
drivers/gpu/drm/nouveau/nv50_fifo.c
drivers/gpu/drm/nouveau/nv50_gpio.c
drivers/gpu/drm/nouveau/nv50_graph.c
drivers/gpu/drm/nouveau/nv50_pm.c
drivers/gpu/drm/nouveau/nv50_sor.c
drivers/gpu/drm/nouveau/nv50_vm.c
drivers/gpu/drm/nouveau/nv84_bsp.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nv84_vp.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nv98_crypt.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nv98_ppp.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nva3_copy.fuc
drivers/gpu/drm/nouveau/nva3_copy.fuc.h
drivers/gpu/drm/nouveau/nva3_pm.c
drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
drivers/gpu/drm/nouveau/nvc0_graph.c
drivers/gpu/drm/nouveau/nvc0_graph.fuc
drivers/gpu/drm/nouveau/nvc0_graph.h
drivers/gpu/drm/nouveau/nvc0_grctx.c
drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
drivers/gpu/drm/nouveau/nvc0_grhub.fuc
drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
drivers/gpu/drm/nouveau/nvc0_pm.c
drivers/gpu/drm/nouveau/nvd0_display.c
drivers/gpu/drm/r128/r128_drv.c
drivers/gpu/drm/radeon/Makefile
drivers/gpu/drm/radeon/atom.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_blit_kms.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/evergreen_reg.h
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/nid.h
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r200.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r420.c
drivers/gpu/drm/radeon/r500_reg.h
drivers/gpu/drm/radeon/r520.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_audio.c
drivers/gpu/drm/radeon/r600_blit_kms.c
drivers/gpu/drm/radeon/r600_cp.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/r600_hdmi.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_benchmark.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_legacy_crtc.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_object.h
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/radeon_sa.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_semaphore.c [new file with mode: 0644]
drivers/gpu/drm/radeon/radeon_test.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/rs400.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/savage/savage_drv.c
drivers/gpu/drm/sis/sis_drv.c
drivers/gpu/drm/sis/sis_drv.h
drivers/gpu/drm/sis/sis_mm.c
drivers/gpu/drm/tdfx/tdfx_drv.c
drivers/gpu/drm/ttm/Makefile
drivers/gpu/drm/ttm/ttm_agp_backend.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/ttm/ttm_memory.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c [new file with mode: 0644]
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/via/via_drv.c
drivers/gpu/drm/via/via_drv.h
drivers/gpu/drm/via/via_map.c
drivers/gpu/drm/via/via_mm.c
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/staging/gma500/accel_2d.c
drivers/staging/gma500/cdv_intel_display.c
drivers/staging/gma500/framebuffer.c
drivers/staging/gma500/mdfld_intel_display.c
drivers/staging/gma500/mrst_crtc.c
drivers/staging/gma500/psb_drv.c
drivers/staging/gma500/psb_intel_display.c
drivers/xen/swiotlb-xen.c
include/drm/Kbuild
include/drm/drm.h
include/drm/drmP.h
include/drm/drm_crtc.h
include/drm/drm_crtc_helper.h
include/drm/drm_fourcc.h [new file with mode: 0644]
include/drm/drm_mode.h
include/drm/drm_sman.h [deleted file]
include/drm/exynos_drm.h
include/drm/gma_drm.h [new file with mode: 0644]
include/drm/i915_drm.h
include/drm/radeon_drm.h
include/drm/sis_drm.h
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_bo_driver.h
include/drm/ttm/ttm_page_alloc.h
include/drm/via_drm.h
include/linux/swiotlb.h
lib/swiotlb.c

index b072648..17e05d1 100644 (file)
@@ -514,12 +514,12 @@ static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_
        switch (*bridge_agpstat & 7) {
        case 4:
                *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
-               printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate"
+               printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate"
                        "Fixing up support for x2 & x1\n");
                break;
        case 2:
                *bridge_agpstat |= AGPSTAT2_1X;
-               printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate"
+               printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate"
                        "Fixing up support for x1\n");
                break;
        default:
@@ -693,7 +693,7 @@ static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_
                        *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
                        *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
                } else {
-                       printk(KERN_INFO PFX "Fell back to AGPx4 mode because");
+                       printk(KERN_INFO PFX "Fell back to AGPx4 mode because ");
                        if (!(*bridge_agpstat & AGPSTAT3_8X)) {
                                printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
                                        *bridge_agpstat, origbridge);
@@ -956,7 +956,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
        bridge->driver->cache_flush();
 #ifdef CONFIG_X86
        if (set_memory_uc((unsigned long)table, 1 << page_order))
-               printk(KERN_WARNING "Could not set GATT table memory to UC!");
+               printk(KERN_WARNING "Could not set GATT table memory to UC!\n");
 
        bridge->gatt_table = (void *)table;
 #else
index 1368826..2418429 100644 (file)
@@ -162,3 +162,6 @@ config DRM_SAVAGE
 source "drivers/gpu/drm/exynos/Kconfig"
 
 source "drivers/gpu/drm/vmwgfx/Kconfig"
+
+source "drivers/gpu/drm/gma500/Kconfig"
+
index c0496f6..0cde1b8 100644 (file)
@@ -9,7 +9,7 @@ drm-y       :=  drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
                drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
                drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
                drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
-               drm_platform.o drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
+               drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
                drm_crtc.o drm_modes.o drm_edid.o \
                drm_info.o drm_debugfs.o drm_encoder_slave.o \
                drm_trace_points.o drm_global.o drm_usb.o
@@ -36,4 +36,5 @@ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
 obj-$(CONFIG_DRM_VIA)  +=via/
 obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
 obj-$(CONFIG_DRM_EXYNOS) +=exynos/
+obj-$(CONFIG_DRM_GMA500) += gma500/
 obj-y                  += i2c/
index 6d440fb..325365f 100644 (file)
@@ -154,8 +154,6 @@ int drm_getsareactx(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       mutex_unlock(&dev->struct_mutex);
-
        request->handle = NULL;
        list_for_each_entry(_entry, &dev->maplist, head) {
                if (_entry->map == map) {
@@ -164,6 +162,9 @@ int drm_getsareactx(struct drm_device *dev, void *data,
                        break;
                }
        }
+
+       mutex_unlock(&dev->struct_mutex);
+
        if (request->handle == NULL)
                return -EINVAL;
 
index 8323fc3..5e818a8 100644 (file)
@@ -36,6 +36,7 @@
 #include "drmP.h"
 #include "drm_crtc.h"
 #include "drm_edid.h"
+#include "drm_fourcc.h"
 
 struct drm_prop_enum_list {
        int type;
@@ -324,6 +325,7 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
 {
        struct drm_device *dev = fb->dev;
        struct drm_crtc *crtc;
+       struct drm_plane *plane;
        struct drm_mode_set set;
        int ret;
 
@@ -340,6 +342,18 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
                }
        }
 
+       list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+               if (plane->fb == fb) {
+                       /* should turn off the crtc */
+                       ret = plane->funcs->disable_plane(plane);
+                       if (ret)
+                               DRM_ERROR("failed to disable plane with busy fb\n");
+                       /* disconnect the plane from the fb and crtc: */
+                       plane->fb = NULL;
+                       plane->crtc = NULL;
+               }
+       }
+
        drm_mode_object_put(dev, &fb->base);
        list_del(&fb->head);
        dev->mode_config.num_fb--;
@@ -540,6 +554,63 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
 }
 EXPORT_SYMBOL(drm_encoder_cleanup);
 
+int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
+                  unsigned long possible_crtcs,
+                  const struct drm_plane_funcs *funcs,
+                  const uint32_t *formats, uint32_t format_count,
+                  bool priv)
+{
+       mutex_lock(&dev->mode_config.mutex);
+
+       plane->dev = dev;
+       drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
+       plane->funcs = funcs;
+       plane->format_types = kmalloc(sizeof(uint32_t) * format_count,
+                                     GFP_KERNEL);
+       if (!plane->format_types) {
+               DRM_DEBUG_KMS("out of memory when allocating plane\n");
+               drm_mode_object_put(dev, &plane->base);
+               mutex_unlock(&dev->mode_config.mutex);
+               return -ENOMEM;
+       }
+
+       memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
+       plane->format_count = format_count;
+       plane->possible_crtcs = possible_crtcs;
+
+       /* private planes are not exposed to userspace, but depending on
+        * display hardware, might be convenient to allow sharing programming
+        * for the scanout engine with the crtc implementation.
+        */
+       if (!priv) {
+               list_add_tail(&plane->head, &dev->mode_config.plane_list);
+               dev->mode_config.num_plane++;
+       } else {
+               INIT_LIST_HEAD(&plane->head);
+       }
+
+       mutex_unlock(&dev->mode_config.mutex);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_plane_init);
+
+void drm_plane_cleanup(struct drm_plane *plane)
+{
+       struct drm_device *dev = plane->dev;
+
+       mutex_lock(&dev->mode_config.mutex);
+       kfree(plane->format_types);
+       drm_mode_object_put(dev, &plane->base);
+       /* if not added to a list, it must be a private plane */
+       if (!list_empty(&plane->head)) {
+               list_del(&plane->head);
+               dev->mode_config.num_plane--;
+       }
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_plane_cleanup);
+
 /**
  * drm_mode_create - create a new display mode
  * @dev: DRM device
@@ -871,6 +942,7 @@ void drm_mode_config_init(struct drm_device *dev)
        INIT_LIST_HEAD(&dev->mode_config.encoder_list);
        INIT_LIST_HEAD(&dev->mode_config.property_list);
        INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+       INIT_LIST_HEAD(&dev->mode_config.plane_list);
        idr_init(&dev->mode_config.crtc_idr);
 
        mutex_lock(&dev->mode_config.mutex);
@@ -947,6 +1019,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
        struct drm_encoder *encoder, *enct;
        struct drm_framebuffer *fb, *fbt;
        struct drm_property *property, *pt;
+       struct drm_plane *plane, *plt;
 
        list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
                                 head) {
@@ -971,6 +1044,10 @@ void drm_mode_config_cleanup(struct drm_device *dev)
                crtc->funcs->destroy(crtc);
        }
 
+       list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
+                                head) {
+               plane->funcs->destroy(plane);
+       }
 }
 EXPORT_SYMBOL(drm_mode_config_cleanup);
 
@@ -1379,7 +1456,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
         */
        if ((out_resp->count_modes >= mode_count) && mode_count) {
                copied = 0;
-               mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
+               mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
                list_for_each_entry(mode, &connector->modes, head) {
                        drm_crtc_convert_to_umode(&u_mode, mode);
                        if (copy_to_user(mode_ptr + copied,
@@ -1394,8 +1471,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
 
        if ((out_resp->count_props >= props_count) && props_count) {
                copied = 0;
-               prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
-               prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
+               prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
+               prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
                for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
                        if (connector->property_ids[i] != 0) {
                                if (put_user(connector->property_ids[i],
@@ -1417,7 +1494,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
 
        if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
                copied = 0;
-               encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
+               encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
                for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
                        if (connector->encoder_ids[i] != 0) {
                                if (put_user(connector->encoder_ids[i],
@@ -1471,6 +1548,245 @@ out:
 }
 
 /**
+ * drm_mode_getplane_res - get plane info
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Return an plane count and set of IDs.
+ */
+int drm_mode_getplane_res(struct drm_device *dev, void *data,
+                           struct drm_file *file_priv)
+{
+       struct drm_mode_get_plane_res *plane_resp = data;
+       struct drm_mode_config *config;
+       struct drm_plane *plane;
+       uint32_t __user *plane_ptr;
+       int copied = 0, ret = 0;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       mutex_lock(&dev->mode_config.mutex);
+       config = &dev->mode_config;
+
+       /*
+        * This ioctl is called twice, once to determine how much space is
+        * needed, and the 2nd time to fill it.
+        */
+       if (config->num_plane &&
+           (plane_resp->count_planes >= config->num_plane)) {
+               plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
+
+               list_for_each_entry(plane, &config->plane_list, head) {
+                       if (put_user(plane->base.id, plane_ptr + copied)) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
+                       copied++;
+               }
+       }
+       plane_resp->count_planes = config->num_plane;
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_getplane - get plane info
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Return plane info, including formats supported, gamma size, any
+ * current fb, etc.
+ */
+int drm_mode_getplane(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+       struct drm_mode_get_plane *plane_resp = data;
+       struct drm_mode_object *obj;
+       struct drm_plane *plane;
+       uint32_t __user *format_ptr;
+       int ret = 0;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, plane_resp->plane_id,
+                                  DRM_MODE_OBJECT_PLANE);
+       if (!obj) {
+               ret = -ENOENT;
+               goto out;
+       }
+       plane = obj_to_plane(obj);
+
+       if (plane->crtc)
+               plane_resp->crtc_id = plane->crtc->base.id;
+       else
+               plane_resp->crtc_id = 0;
+
+       if (plane->fb)
+               plane_resp->fb_id = plane->fb->base.id;
+       else
+               plane_resp->fb_id = 0;
+
+       plane_resp->plane_id = plane->base.id;
+       plane_resp->possible_crtcs = plane->possible_crtcs;
+       plane_resp->gamma_size = plane->gamma_size;
+
+       /*
+        * This ioctl is called twice, once to determine how much space is
+        * needed, and the 2nd time to fill it.
+        */
+       if (plane->format_count &&
+           (plane_resp->count_format_types >= plane->format_count)) {
+               format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
+               if (copy_to_user(format_ptr,
+                                plane->format_types,
+                                sizeof(uint32_t) * plane->format_count)) {
+                       ret = -EFAULT;
+                       goto out;
+               }
+       }
+       plane_resp->count_format_types = plane->format_count;
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_setplane - set up or tear down an plane
+ * @dev: DRM device
+ * @data: ioctl data*
+ * @file_prive: DRM file info
+ *
+ * Set plane info, including placement, fb, scaling, and other factors.
+ * Or pass a NULL fb to disable.
+ */
+int drm_mode_setplane(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+       struct drm_mode_set_plane *plane_req = data;
+       struct drm_mode_object *obj;
+       struct drm_plane *plane;
+       struct drm_crtc *crtc;
+       struct drm_framebuffer *fb;
+       int ret = 0;
+       unsigned int fb_width, fb_height;
+       int i;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       /*
+        * First, find the plane, crtc, and fb objects.  If not available,
+        * we don't bother to call the driver.
+        */
+       obj = drm_mode_object_find(dev, plane_req->plane_id,
+                                  DRM_MODE_OBJECT_PLANE);
+       if (!obj) {
+               DRM_DEBUG_KMS("Unknown plane ID %d\n",
+                             plane_req->plane_id);
+               ret = -ENOENT;
+               goto out;
+       }
+       plane = obj_to_plane(obj);
+
+       /* No fb means shut it down */
+       if (!plane_req->fb_id) {
+               plane->funcs->disable_plane(plane);
+               plane->crtc = NULL;
+               plane->fb = NULL;
+               goto out;
+       }
+
+       obj = drm_mode_object_find(dev, plane_req->crtc_id,
+                                  DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+                             plane_req->crtc_id);
+               ret = -ENOENT;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+
+       obj = drm_mode_object_find(dev, plane_req->fb_id,
+                                  DRM_MODE_OBJECT_FB);
+       if (!obj) {
+               DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
+                             plane_req->fb_id);
+               ret = -ENOENT;
+               goto out;
+       }
+       fb = obj_to_fb(obj);
+
+       /* Check whether this plane supports the fb pixel format. */
+       for (i = 0; i < plane->format_count; i++)
+               if (fb->pixel_format == plane->format_types[i])
+                       break;
+       if (i == plane->format_count) {
+               DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       fb_width = fb->width << 16;
+       fb_height = fb->height << 16;
+
+       /* Make sure source coordinates are inside the fb. */
+       if (plane_req->src_w > fb_width ||
+           plane_req->src_x > fb_width - plane_req->src_w ||
+           plane_req->src_h > fb_height ||
+           plane_req->src_y > fb_height - plane_req->src_h) {
+               DRM_DEBUG_KMS("Invalid source coordinates "
+                             "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+                             plane_req->src_w >> 16,
+                             ((plane_req->src_w & 0xffff) * 15625) >> 10,
+                             plane_req->src_h >> 16,
+                             ((plane_req->src_h & 0xffff) * 15625) >> 10,
+                             plane_req->src_x >> 16,
+                             ((plane_req->src_x & 0xffff) * 15625) >> 10,
+                             plane_req->src_y >> 16,
+                             ((plane_req->src_y & 0xffff) * 15625) >> 10);
+               ret = -ENOSPC;
+               goto out;
+       }
+
+       /* Give drivers some help against integer overflows */
+       if (plane_req->crtc_w > INT_MAX ||
+           plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
+           plane_req->crtc_h > INT_MAX ||
+           plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
+               DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+                             plane_req->crtc_w, plane_req->crtc_h,
+                             plane_req->crtc_x, plane_req->crtc_y);
+               ret = -ERANGE;
+               goto out;
+       }
+
+       ret = plane->funcs->update_plane(plane, crtc, fb,
+                                        plane_req->crtc_x, plane_req->crtc_y,
+                                        plane_req->crtc_w, plane_req->crtc_h,
+                                        plane_req->src_x, plane_req->src_y,
+                                        plane_req->src_w, plane_req->src_h);
+       if (!ret) {
+               plane->crtc = crtc;
+               plane->fb = fb;
+       }
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+
+       return ret;
+}
+
+/**
  * drm_mode_setcrtc - set CRTC configuration
  * @inode: inode from the ioctl
  * @filp: file * from the ioctl
@@ -1576,7 +1892,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
                }
 
                for (i = 0; i < crtc_req->count_connectors; i++) {
-                       set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
+                       set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
                        if (get_user(out_id, &set_connectors_ptr[i])) {
                                ret = -EFAULT;
                                goto out;
@@ -1625,10 +1941,8 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
 
-       if (!req->flags) {
-               DRM_ERROR("no operation set\n");
+       if (!req->flags)
                return -EINVAL;
-       }
 
        mutex_lock(&dev->mode_config.mutex);
        obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
@@ -1641,7 +1955,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
 
        if (req->flags & DRM_MODE_CURSOR_BO) {
                if (!crtc->funcs->cursor_set) {
-                       DRM_ERROR("crtc does not support cursor\n");
                        ret = -ENXIO;
                        goto out;
                }
@@ -1654,7 +1967,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
                if (crtc->funcs->cursor_move) {
                        ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
                } else {
-                       DRM_ERROR("crtc does not support cursor\n");
                        ret = -EFAULT;
                        goto out;
                }
@@ -1664,6 +1976,42 @@ out:
        return ret;
 }
 
+/* Original addfb only supported RGB formats, so figure out which one */
+uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
+{
+       uint32_t fmt;
+
+       switch (bpp) {
+       case 8:
+               fmt = DRM_FORMAT_RGB332;
+               break;
+       case 16:
+               if (depth == 15)
+                       fmt = DRM_FORMAT_XRGB1555;
+               else
+                       fmt = DRM_FORMAT_RGB565;
+               break;
+       case 24:
+               fmt = DRM_FORMAT_RGB888;
+               break;
+       case 32:
+               if (depth == 24)
+                       fmt = DRM_FORMAT_XRGB8888;
+               else if (depth == 30)
+                       fmt = DRM_FORMAT_XRGB2101010;
+               else
+                       fmt = DRM_FORMAT_ARGB8888;
+               break;
+       default:
+               DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
+               fmt = DRM_FORMAT_XRGB8888;
+               break;
+       }
+
+       return fmt;
+}
+EXPORT_SYMBOL(drm_mode_legacy_fb_format);
+
 /**
  * drm_mode_addfb - add an FB to the graphics configuration
  * @inode: inode from the ioctl
@@ -1684,7 +2032,140 @@ out:
 int drm_mode_addfb(struct drm_device *dev,
                   void *data, struct drm_file *file_priv)
 {
-       struct drm_mode_fb_cmd *r = data;
+       struct drm_mode_fb_cmd *or = data;
+       struct drm_mode_fb_cmd2 r = {};
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_framebuffer *fb;
+       int ret = 0;
+
+       /* Use new struct with format internally */
+       r.fb_id = or->fb_id;
+       r.width = or->width;
+       r.height = or->height;
+       r.pitches[0] = or->pitch;
+       r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
+       r.handles[0] = or->handle;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       if ((config->min_width > r.width) || (r.width > config->max_width))
+               return -EINVAL;
+
+       if ((config->min_height > r.height) || (r.height > config->max_height))
+               return -EINVAL;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       /* TODO check buffer is sufficiently large */
+       /* TODO setup destructor callback */
+
+       fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
+       if (IS_ERR(fb)) {
+               DRM_ERROR("could not create framebuffer\n");
+               ret = PTR_ERR(fb);
+               goto out;
+       }
+
+       or->fb_id = fb->base.id;
+       list_add(&fb->filp_head, &file_priv->fbs);
+       DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+static int format_check(struct drm_mode_fb_cmd2 *r)
+{
+       uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
+
+       switch (format) {
+       case DRM_FORMAT_C8:
+       case DRM_FORMAT_RGB332:
+       case DRM_FORMAT_BGR233:
+       case DRM_FORMAT_XRGB4444:
+       case DRM_FORMAT_XBGR4444:
+       case DRM_FORMAT_RGBX4444:
+       case DRM_FORMAT_BGRX4444:
+       case DRM_FORMAT_ARGB4444:
+       case DRM_FORMAT_ABGR4444:
+       case DRM_FORMAT_RGBA4444:
+       case DRM_FORMAT_BGRA4444:
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_XBGR1555:
+       case DRM_FORMAT_RGBX5551:
+       case DRM_FORMAT_BGRX5551:
+       case DRM_FORMAT_ARGB1555:
+       case DRM_FORMAT_ABGR1555:
+       case DRM_FORMAT_RGBA5551:
+       case DRM_FORMAT_BGRA5551:
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_BGR565:
+       case DRM_FORMAT_RGB888:
+       case DRM_FORMAT_BGR888:
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_RGBX8888:
+       case DRM_FORMAT_BGRX8888:
+       case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_ABGR8888:
+       case DRM_FORMAT_RGBA8888:
+       case DRM_FORMAT_BGRA8888:
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_XBGR2101010:
+       case DRM_FORMAT_RGBX1010102:
+       case DRM_FORMAT_BGRX1010102:
+       case DRM_FORMAT_ARGB2101010:
+       case DRM_FORMAT_ABGR2101010:
+       case DRM_FORMAT_RGBA1010102:
+       case DRM_FORMAT_BGRA1010102:
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_VYUY:
+       case DRM_FORMAT_AYUV:
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV61:
+       case DRM_FORMAT_YUV410:
+       case DRM_FORMAT_YVU410:
+       case DRM_FORMAT_YUV411:
+       case DRM_FORMAT_YVU411:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YVU422:
+       case DRM_FORMAT_YUV444:
+       case DRM_FORMAT_YVU444:
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+/**
+ * drm_mode_addfb2 - add an FB to the graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Add a new FB to the specified CRTC, given a user request with format.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_addfb2(struct drm_device *dev,
+                   void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_fb_cmd2 *r = data;
        struct drm_mode_config *config = &dev->mode_config;
        struct drm_framebuffer *fb;
        int ret = 0;
@@ -1693,18 +2174,23 @@ int drm_mode_addfb(struct drm_device *dev,
                return -EINVAL;
 
        if ((config->min_width > r->width) || (r->width > config->max_width)) {
-               DRM_ERROR("mode new framebuffer width not within limits\n");
+               DRM_ERROR("bad framebuffer width %d, should be >= %d && <= %d\n",
+                         r->width, config->min_width, config->max_width);
                return -EINVAL;
        }
        if ((config->min_height > r->height) || (r->height > config->max_height)) {
-               DRM_ERROR("mode new framebuffer height not within limits\n");
+               DRM_ERROR("bad framebuffer height %d, should be >= %d && <= %d\n",
+                         r->height, config->min_height, config->max_height);
                return -EINVAL;
        }
 
-       mutex_lock(&dev->mode_config.mutex);
+       ret = format_check(r);
+       if (ret) {
+               DRM_ERROR("bad framebuffer format 0x%08x\n", r->pixel_format);
+               return ret;
+       }
 
-       /* TODO check buffer is sufficiently large */
-       /* TODO setup destructor callback */
+       mutex_lock(&dev->mode_config.mutex);
 
        fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
        if (IS_ERR(fb)) {
@@ -1756,7 +2242,6 @@ int drm_mode_rmfb(struct drm_device *dev,
        obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
        /* TODO check that we really get a framebuffer back. */
        if (!obj) {
-               DRM_ERROR("mode invalid framebuffer id\n");
                ret = -EINVAL;
                goto out;
        }
@@ -1767,7 +2252,6 @@ int drm_mode_rmfb(struct drm_device *dev,
                        found = 1;
 
        if (!found) {
-               DRM_ERROR("tried to remove a fb that we didn't own\n");
                ret = -EINVAL;
                goto out;
        }
@@ -1814,7 +2298,6 @@ int drm_mode_getfb(struct drm_device *dev,
        mutex_lock(&dev->mode_config.mutex);
        obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
        if (!obj) {
-               DRM_ERROR("invalid framebuffer id\n");
                ret = -EINVAL;
                goto out;
        }
@@ -1824,7 +2307,7 @@ int drm_mode_getfb(struct drm_device *dev,
        r->width = fb->width;
        r->depth = fb->depth;
        r->bpp = fb->bits_per_pixel;
-       r->pitch = fb->pitch;
+       r->pitch = fb->pitches[0];
        fb->funcs->create_handle(fb, file_priv, &r->handle);
 
 out:
@@ -1850,14 +2333,13 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
        mutex_lock(&dev->mode_config.mutex);
        obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
        if (!obj) {
-               DRM_ERROR("invalid framebuffer id\n");
                ret = -EINVAL;
                goto out_err1;
        }
        fb = obj_to_fb(obj);
 
        num_clips = r->num_clips;
-       clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
+       clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
 
        if (!num_clips != !clips_ptr) {
                ret = -EINVAL;
@@ -2253,7 +2735,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
        struct drm_property_enum *prop_enum;
        struct drm_mode_property_enum __user *enum_ptr;
        struct drm_property_blob *prop_blob;
-       uint32_t *blob_id_ptr;
+       uint32_t __user *blob_id_ptr;
        uint64_t __user *values_ptr;
        uint32_t __user *blob_length_ptr;
 
@@ -2283,7 +2765,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
        out_resp->flags = property->flags;
 
        if ((out_resp->count_values >= value_count) && value_count) {
-               values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
+               values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
                for (i = 0; i < value_count; i++) {
                        if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
                                ret = -EFAULT;
@@ -2296,7 +2778,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
        if (property->flags & DRM_MODE_PROP_ENUM) {
                if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
                        copied = 0;
-                       enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
+                       enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
                        list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
 
                                if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
@@ -2318,8 +2800,8 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
        if (property->flags & DRM_MODE_PROP_BLOB) {
                if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
                        copied = 0;
-                       blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
-                       blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
+                       blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
+                       blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
 
                        list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
                                if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
@@ -2380,7 +2862,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
        struct drm_mode_get_blob *out_resp = data;
        struct drm_property_blob *blob;
        int ret = 0;
-       void *blob_ptr;
+       void __user *blob_ptr;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
@@ -2394,7 +2876,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
        blob = obj_to_blob(obj);
 
        if (out_resp->length == blob->length) {
-               blob_ptr = (void *)(unsigned long)out_resp->data;
+               blob_ptr = (void __user *)(unsigned long)out_resp->data;
                if (copy_to_user(blob_ptr, blob->data, blob->length)){
                        ret = -EFAULT;
                        goto done;
@@ -2788,3 +3270,71 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
 
        return dev->driver->dumb_destroy(file_priv, dev, args->handle);
 }
+
+/*
+ * Just need to support RGB formats here for compat with code that doesn't
+ * use pixel formats directly yet.
+ */
+void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+                         int *bpp)
+{
+       switch (format) {
+       case DRM_FORMAT_RGB332:
+       case DRM_FORMAT_BGR233:
+               *depth = 8;
+               *bpp = 8;
+               break;
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_XBGR1555:
+       case DRM_FORMAT_RGBX5551:
+       case DRM_FORMAT_BGRX5551:
+       case DRM_FORMAT_ARGB1555:
+       case DRM_FORMAT_ABGR1555:
+       case DRM_FORMAT_RGBA5551:
+       case DRM_FORMAT_BGRA5551:
+               *depth = 15;
+               *bpp = 16;
+               break;
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_BGR565:
+               *depth = 16;
+               *bpp = 16;
+               break;
+       case DRM_FORMAT_RGB888:
+       case DRM_FORMAT_BGR888:
+               *depth = 24;
+               *bpp = 24;
+               break;
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_RGBX8888:
+       case DRM_FORMAT_BGRX8888:
+               *depth = 24;
+               *bpp = 32;
+               break;
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_XBGR2101010:
+       case DRM_FORMAT_RGBX1010102:
+       case DRM_FORMAT_BGRX1010102:
+       case DRM_FORMAT_ARGB2101010:
+       case DRM_FORMAT_ABGR2101010:
+       case DRM_FORMAT_RGBA1010102:
+       case DRM_FORMAT_BGRA1010102:
+               *depth = 30;
+               *bpp = 32;
+               break;
+       case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_ABGR8888:
+       case DRM_FORMAT_RGBA8888:
+       case DRM_FORMAT_BGRA8888:
+               *depth = 32;
+               *bpp = 32;
+               break;
+       default:
+               DRM_DEBUG_KMS("unsupported pixel format\n");
+               *depth = 0;
+               *bpp = 0;
+               break;
+       }
+}
+EXPORT_SYMBOL(drm_fb_get_bpp_depth);
index d2619d7..84a4a80 100644 (file)
@@ -34,6 +34,7 @@
 
 #include "drmP.h"
 #include "drm_crtc.h"
+#include "drm_fourcc.h"
 #include "drm_crtc_helper.h"
 #include "drm_fb_helper.h"
 
@@ -710,7 +711,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                        for (i = 0; i < set->num_connectors; i++) {
                                DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
                                              drm_get_connector_name(set->connectors[i]));
-                               set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
+                               set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
                        }
                }
                drm_helper_disable_unused_functions(dev);
@@ -847,13 +848,19 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
 EXPORT_SYMBOL(drm_helper_connector_dpms);
 
 int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
-                                  struct drm_mode_fb_cmd *mode_cmd)
+                                  struct drm_mode_fb_cmd2 *mode_cmd)
 {
+       int i;
+
        fb->width = mode_cmd->width;
        fb->height = mode_cmd->height;
-       fb->pitch = mode_cmd->pitch;
-       fb->bits_per_pixel = mode_cmd->bpp;
-       fb->depth = mode_cmd->depth;
+       for (i = 0; i < 4; i++) {
+               fb->pitches[i] = mode_cmd->pitches[i];
+               fb->offsets[i] = mode_cmd->offsets[i];
+       }
+       drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
+                                   &fb->bits_per_pixel);
+       fb->pixel_format = mode_cmd->pixel_format;
 
        return 0;
 }
@@ -1008,3 +1015,36 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
                queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
 }
 EXPORT_SYMBOL(drm_helper_hpd_irq_event);
+
+
+/**
+ * drm_format_num_planes - get the number of planes for format
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The number of planes used by the specified pixel format.
+ */
+int drm_format_num_planes(uint32_t format)
+{
+       switch (format) {
+       case DRM_FORMAT_YUV410:
+       case DRM_FORMAT_YVU410:
+       case DRM_FORMAT_YUV411:
+       case DRM_FORMAT_YVU411:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YVU422:
+       case DRM_FORMAT_YUV444:
+       case DRM_FORMAT_YVU444:
+               return 3;
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV61:
+               return 2;
+       default:
+               return 1;
+       }
+}
+EXPORT_SYMBOL(drm_format_num_planes);
index 40c187c..ebf7d3f 100644 (file)
@@ -61,14 +61,14 @@ static int drm_version(struct drm_device *dev, void *data,
 
 /** Ioctl table */
 static struct drm_ioctl_desc drm_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
        DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
-       DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
-       DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
-       DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
 
        DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -136,8 +136,11 @@ static struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
 
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
@@ -150,6 +153,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
index 3e927ce..ece03fc 100644 (file)
@@ -508,25 +508,10 @@ static void
 cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
 {
        int i, n = 0;
-       u8 rev = ext[0x01], d = ext[0x02];
+       u8 d = ext[0x02];
        u8 *det_base = ext + d;
 
-       switch (rev) {
-       case 0:
-               /* can't happen */
-               return;
-       case 1:
-               /* have to infer how many blocks we have, check pixel clock */
-               for (i = 0; i < 6; i++)
-                       if (det_base[18*i] || det_base[18*i+1])
-                               n++;
-               break;
-       default:
-               /* explicit count */
-               n = min(ext[0x03] & 0x0f, 6);
-               break;
-       }
-
+       n = (127 - d) / 18;
        for (i = 0; i < n; i++)
                cb((struct detailed_timing *)(det_base + 18 * i), closure);
 }
@@ -1319,6 +1304,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
 
 #define HDMI_IDENTIFIER 0x000C03
 #define AUDIO_BLOCK    0x01
+#define VIDEO_BLOCK     0x02
 #define VENDOR_BLOCK    0x03
 #define SPEAKER_BLOCK  0x04
 #define EDID_BASIC_AUDIO       (1 << 6)
@@ -1349,6 +1335,47 @@ u8 *drm_find_cea_extension(struct edid *edid)
 }
 EXPORT_SYMBOL(drm_find_cea_extension);
 
+static int
+do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
+{
+       struct drm_device *dev = connector->dev;
+       u8 * mode, cea_mode;
+       int modes = 0;
+
+       for (mode = db; mode < db + len; mode++) {
+               cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
+               if (cea_mode < drm_num_cea_modes) {
+                       struct drm_display_mode *newmode;
+                       newmode = drm_mode_duplicate(dev,
+                                                    &edid_cea_modes[cea_mode]);
+                       if (newmode) {
+                               drm_mode_probed_add(connector, newmode);
+                               modes++;
+                       }
+               }
+       }
+
+       return modes;
+}
+
+static int
+add_cea_modes(struct drm_connector *connector, struct edid *edid)
+{
+       u8 * cea = drm_find_cea_extension(edid);
+       u8 * db, dbl;
+       int modes = 0;
+
+       if (cea && cea[1] >= 3) {
+               for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
+                       dbl = db[0] & 0x1f;
+                       if (((db[0] & 0xe0) >> 5) == VIDEO_BLOCK)
+                               modes += do_cea_modes (connector, db+1, dbl);
+               }
+       }
+
+       return modes;
+}
+
 static void
 parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db)
 {
@@ -1432,26 +1459,29 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
        eld[18] = edid->prod_code[0];
        eld[19] = edid->prod_code[1];
 
-       for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
-               dbl = db[0] & 0x1f;
-
-               switch ((db[0] & 0xe0) >> 5) {
-               case AUDIO_BLOCK:       /* Audio Data Block, contains SADs */
-                       sad_count = dbl / 3;
-                       memcpy(eld + 20 + mnl, &db[1], dbl);
-                       break;
-               case SPEAKER_BLOCK:     /* Speaker Allocation Data Block */
-                       eld[7] = db[1];
-                       break;
-               case VENDOR_BLOCK:
-                       /* HDMI Vendor-Specific Data Block */
-                       if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
-                               parse_hdmi_vsdb(connector, db);
-                       break;
-               default:
-                       break;
+       if (cea[1] >= 3)
+               for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
+                       dbl = db[0] & 0x1f;
+                       
+                       switch ((db[0] & 0xe0) >> 5) {
+                       case AUDIO_BLOCK:
+                               /* Audio Data Block, contains SADs */
+                               sad_count = dbl / 3;
+                               memcpy(eld + 20 + mnl, &db[1], dbl);
+                               break;
+                       case SPEAKER_BLOCK:
+                                /* Speaker Allocation Data Block */
+                               eld[7] = db[1];
+                               break;
+                       case VENDOR_BLOCK:
+                               /* HDMI Vendor-Specific Data Block */
+                               if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
+                                       parse_hdmi_vsdb(connector, db);
+                               break;
+                       default:
+                               break;
+                       }
                }
-       }
        eld[5] |= sad_count << 4;
        eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
 
@@ -1722,6 +1752,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
        num_modes += add_standard_modes(connector, edid);
        num_modes += add_established_modes(connector, edid);
        num_modes += add_inferred_modes(connector, edid);
+       num_modes += add_cea_modes(connector, edid);
 
        if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
                edid_fixup_preferred(connector, quirks);
index 5f20644..a91ffb1 100644 (file)
@@ -378,3 +378,287 @@ static const struct {
        { 1920, 1440, 75, 0 },
 };
 static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
+
+/*
+ * Probably taken from CEA-861 spec.
+ * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
+ */
+static const struct drm_display_mode edid_cea_modes[] = {
+       /* 640x480@60Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+                  752, 800, 0, 480, 490, 492, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 720x480@60Hz */
+       { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+                  798, 858, 0, 480, 489, 495, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 720x480@60Hz */
+       { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+                  798, 858, 0, 480, 489, 495, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1280x720@60Hz */
+       { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+                  1430, 1650, 0, 720, 725, 730, 750, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1080i@60Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+                  2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1440x480i@60Hz */
+       { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+                  1602, 1716, 0, 480, 488, 494, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1440x480i@60Hz */
+       { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+                  1602, 1716, 0, 480, 488, 494, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1440x240@60Hz */
+       { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+                  1602, 1716, 0, 240, 244, 247, 262, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1440x240@60Hz */
+       { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+                  1602, 1716, 0, 240, 244, 247, 262, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 2880x480i@60Hz */
+       { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+                  3204, 3432, 0, 480, 488, 494, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 2880x480i@60Hz */
+       { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+                  3204, 3432, 0, 480, 488, 494, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 2880x240@60Hz */
+       { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+                  3204, 3432, 0, 240, 244, 247, 262, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 2880x240@60Hz */
+       { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+                  3204, 3432, 0, 240, 244, 247, 262, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1440x480@60Hz */
+       { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+                  1596, 1716, 0, 480, 489, 495, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1440x480@60Hz */
+       { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+                  1596, 1716, 0, 480, 489, 495, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1920x1080@60Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+                  2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 720x576@50Hz */
+       { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+                  796, 864, 0, 576, 581, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 720x576@50Hz */
+       { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+                  796, 864, 0, 576, 581, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1280x720@50Hz */
+       { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+                  1760, 1980, 0, 720, 725, 730, 750, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1080i@50Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+                  2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1440x576i@50Hz */
+       { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+                  1590, 1728, 0, 576, 580, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1440x576i@50Hz */
+       { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+                  1590, 1728, 0, 576, 580, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1440x288@50Hz */
+       { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+                  1590, 1728, 0, 288, 290, 293, 312, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1440x288@50Hz */
+       { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+                  1590, 1728, 0, 288, 290, 293, 312, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 2880x576i@50Hz */
+       { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+                  3180, 3456, 0, 576, 580, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 2880x576i@50Hz */
+       { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+                  3180, 3456, 0, 576, 580, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 2880x288@50Hz */
+       { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+                  3180, 3456, 0, 288, 290, 293, 312, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 2880x288@50Hz */
+       { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+                  3180, 3456, 0, 288, 290, 293, 312, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1440x576@50Hz */
+       { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+                  1592, 1728, 0, 576, 581, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1440x576@50Hz */
+       { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+                  1592, 1728, 0, 576, 581, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1920x1080@50Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+                  2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1080@24Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+                  2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1080@25Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+                  2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1080@30Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+                  2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 2880x480@60Hz */
+       { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+                  3192, 3432, 0, 480, 489, 495, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 2880x480@60Hz */
+       { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+                  3192, 3432, 0, 480, 489, 495, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 2880x576@50Hz */
+       { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+                  3184, 3456, 0, 576, 581, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 2880x576@50Hz */
+       { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+                  3184, 3456, 0, 576, 581, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1920x1080i@50Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
+                  2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1920x1080i@100Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+                  2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1280x720@100Hz */
+       { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
+                  1760, 1980, 0, 720, 725, 730, 750, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 720x576@100Hz */
+       { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+                  796, 864, 0, 576, 581, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 720x576@100Hz */
+       { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+                  796, 864, 0, 576, 581, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1440x576i@100Hz */
+       { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+                  1590, 1728, 0, 576, 580, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1440x576i@100Hz */
+       { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+                  1590, 1728, 0, 576, 580, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1920x1080i@120Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+                  2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1280x720@120Hz */
+       { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
+                  1430, 1650, 0, 720, 725, 730, 750, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 720x480@120Hz */
+       { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+                  798, 858, 0, 480, 489, 495, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 720x480@120Hz */
+       { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+                  798, 858, 0, 480, 489, 495, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1440x480i@120Hz */
+       { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+                  1602, 1716, 0, 480, 488, 494, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1440x480i@120Hz */
+       { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+                  1602, 1716, 0, 480, 488, 494, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 720x576@200Hz */
+       { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+                  796, 864, 0, 576, 581, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 720x576@200Hz */
+       { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+                  796, 864, 0, 576, 581, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1440x576i@200Hz */
+       { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+                  1590, 1728, 0, 576, 580, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1440x576i@200Hz */
+       { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+                  1590, 1728, 0, 576, 580, 586, 625, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 720x480@240Hz */
+       { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+                  798, 858, 0, 480, 489, 495, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 720x480@240Hz */
+       { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+                  798, 858, 0, 480, 489, 495, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+       /* 1440x480i@240 */
+       { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+                  1602, 1716, 0, 480, 488, 494, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1440x480i@240 */
+       { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+                  1602, 1716, 0, 480, 488, 494, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                       DRM_MODE_FLAG_INTERLACE) },
+       /* 1280x720@24Hz */
+       { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
+                  3080, 3300, 0, 720, 725, 730, 750, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1280x720@25Hz */
+       { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
+                  3740, 3960, 0, 720, 725, 730, 750, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1280x720@30Hz */
+       { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
+                  3080, 3300, 0, 720, 725, 730, 750, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1080@120Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
+                  2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       /* 1920x1080@100Hz */
+       { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
+                  2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+static const int drm_num_cea_modes =
+       sizeof (edid_cea_modes) / sizeof (edid_cea_modes[0]);
index 80fe39d..aada26f 100644 (file)
@@ -255,6 +255,13 @@ bool drm_fb_helper_force_kernel_mode(void)
 int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
                        void *panic_str)
 {
+       /*
+        * It's a waste of time and effort to switch back to text console
+        * if the kernel should reboot before panic messages can be seen.
+        */
+       if (panic_timeout < 0)
+               return 0;
+
        printk(KERN_ERR "panic occurred, switching back to text console\n");
        return drm_fb_helper_force_kernel_mode();
 }
index 4911e1d..c00cf15 100644 (file)
@@ -182,7 +182,7 @@ int drm_stub_open(struct inode *inode, struct file *filp)
                goto out;
 
        old_fops = filp->f_op;
-       filp->f_op = fops_get(&dev->driver->fops);
+       filp->f_op = fops_get(dev->driver->fops);
        if (filp->f_op == NULL) {
                filp->f_op = old_fops;
                goto out;
index 904d7e9..956fd38 100644 (file)
@@ -158,14 +158,11 @@ int drm_getmap(struct drm_device *dev, void *data,
        int i;
 
        idx = map->offset;
-
-       mutex_lock(&dev->struct_mutex);
-       if (idx < 0) {
-               mutex_unlock(&dev->struct_mutex);
+       if (idx < 0)
                return -EINVAL;
-       }
 
        i = 0;
+       mutex_lock(&dev->struct_mutex);
        list_for_each(list, &dev->maplist) {
                if (i == idx) {
                        r_list = list_entry(list, struct drm_map_list, head);
@@ -211,9 +208,9 @@ int drm_getclient(struct drm_device *dev, void *data,
        int i;
 
        idx = client->idx;
-       mutex_lock(&dev->struct_mutex);
-
        i = 0;
+
+       mutex_lock(&dev->struct_mutex);
        list_for_each_entry(pt, &dev->filelist, lhead) {
                if (i++ >= idx) {
                        client->auth = pt->authenticated;
@@ -249,8 +246,6 @@ int drm_getstats(struct drm_device *dev, void *data,
 
        memset(stats, 0, sizeof(*stats));
 
-       mutex_lock(&dev->struct_mutex);
-
        for (i = 0; i < dev->counters; i++) {
                if (dev->types[i] == _DRM_STAT_LOCK)
                        stats->data[i].value =
@@ -262,8 +257,6 @@ int drm_getstats(struct drm_device *dev, void *data,
 
        stats->count = dev->counters;
 
-       mutex_unlock(&dev->struct_mutex);
-
        return 0;
 }
 
index 632ae24..c79c713 100644 (file)
@@ -33,6 +33,7 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include <linux/export.h>
 #include "drmP.h"
 
 static int drm_notifier(void *priv);
@@ -345,6 +346,7 @@ void drm_idlelock_take(struct drm_lock_data *lock_data)
        }
        spin_unlock_bh(&lock_data->spinlock);
 }
+EXPORT_SYMBOL(drm_idlelock_take);
 
 void drm_idlelock_release(struct drm_lock_data *lock_data)
 {
@@ -364,6 +366,7 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
        }
        spin_unlock_bh(&lock_data->spinlock);
 }
+EXPORT_SYMBOL(drm_idlelock_release);
 
 int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
 {
diff --git a/drivers/gpu/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c
deleted file mode 100644 (file)
index cebce45..0000000
+++ /dev/null
@@ -1,351 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Simple memory manager interface that keeps track on allocate regions on a
- * per "owner" basis. All regions associated with an "owner" can be released
- * with a simple call. Typically if the "owner" exists. The owner is any
- * "unsigned long" identifier. Can typically be a pointer to a file private
- * struct or a context identifier.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include <linux/export.h>
-#include "drm_sman.h"
-
-struct drm_owner_item {
-       struct drm_hash_item owner_hash;
-       struct list_head sman_list;
-       struct list_head mem_blocks;
-};
-
-void drm_sman_takedown(struct drm_sman * sman)
-{
-       drm_ht_remove(&sman->user_hash_tab);
-       drm_ht_remove(&sman->owner_hash_tab);
-       kfree(sman->mm);
-}
-
-EXPORT_SYMBOL(drm_sman_takedown);
-
-int
-drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
-             unsigned int user_order, unsigned int owner_order)
-{
-       int ret = 0;
-
-       sman->mm = kcalloc(num_managers, sizeof(*sman->mm), GFP_KERNEL);
-       if (!sman->mm) {
-               ret = -ENOMEM;
-               goto out;
-       }
-       sman->num_managers = num_managers;
-       INIT_LIST_HEAD(&sman->owner_items);
-       ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
-       if (ret)
-               goto out1;
-       ret = drm_ht_create(&sman->user_hash_tab, user_order);
-       if (!ret)
-               goto out;
-
-       drm_ht_remove(&sman->owner_hash_tab);
-out1:
-       kfree(sman->mm);
-out:
-       return ret;
-}
-
-EXPORT_SYMBOL(drm_sman_init);
-
-static void *drm_sman_mm_allocate(void *private, unsigned long size,
-                                 unsigned alignment)
-{
-       struct drm_mm *mm = (struct drm_mm *) private;
-       struct drm_mm_node *tmp;
-
-       tmp = drm_mm_search_free(mm, size, alignment, 1);
-       if (!tmp) {
-               return NULL;
-       }
-       tmp = drm_mm_get_block(tmp, size, alignment);
-       return tmp;
-}
-
-static void drm_sman_mm_free(void *private, void *ref)
-{
-       struct drm_mm_node *node = (struct drm_mm_node *) ref;
-
-       drm_mm_put_block(node);
-}
-
-static void drm_sman_mm_destroy(void *private)
-{
-       struct drm_mm *mm = (struct drm_mm *) private;
-       drm_mm_takedown(mm);
-       kfree(mm);
-}
-
-static unsigned long drm_sman_mm_offset(void *private, void *ref)
-{
-       struct drm_mm_node *node = (struct drm_mm_node *) ref;
-       return node->start;
-}
-
-int
-drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
-                  unsigned long start, unsigned long size)
-{
-       struct drm_sman_mm *sman_mm;
-       struct drm_mm *mm;
-       int ret;
-
-       BUG_ON(manager >= sman->num_managers);
-
-       sman_mm = &sman->mm[manager];
-       mm = kzalloc(sizeof(*mm), GFP_KERNEL);
-       if (!mm) {
-               return -ENOMEM;
-       }
-       sman_mm->private = mm;
-       ret = drm_mm_init(mm, start, size);
-
-       if (ret) {
-               kfree(mm);
-               return ret;
-       }
-
-       sman_mm->allocate = drm_sman_mm_allocate;
-       sman_mm->free = drm_sman_mm_free;
-       sman_mm->destroy = drm_sman_mm_destroy;
-       sman_mm->offset = drm_sman_mm_offset;
-
-       return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_set_range);
-
-int
-drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
-                    struct drm_sman_mm * allocator)
-{
-       BUG_ON(manager >= sman->num_managers);
-       sman->mm[manager] = *allocator;
-
-       return 0;
-}
-EXPORT_SYMBOL(drm_sman_set_manager);
-
-static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
-                                                unsigned long owner)
-{
-       int ret;
-       struct drm_hash_item *owner_hash_item;
-       struct drm_owner_item *owner_item;
-
-       ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
-       if (!ret) {
-               return drm_hash_entry(owner_hash_item, struct drm_owner_item,
-                                     owner_hash);
-       }
-
-       owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL);
-       if (!owner_item)
-               goto out;
-
-       INIT_LIST_HEAD(&owner_item->mem_blocks);
-       owner_item->owner_hash.key = owner;
-       if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
-               goto out1;
-
-       list_add_tail(&owner_item->sman_list, &sman->owner_items);
-       return owner_item;
-
-out1:
-       kfree(owner_item);
-out:
-       return NULL;
-}
-
-struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
-                                   unsigned long size, unsigned alignment,
-                                   unsigned long owner)
-{
-       void *tmp;
-       struct drm_sman_mm *sman_mm;
-       struct drm_owner_item *owner_item;
-       struct drm_memblock_item *memblock;
-
-       BUG_ON(manager >= sman->num_managers);
-
-       sman_mm = &sman->mm[manager];
-       tmp = sman_mm->allocate(sman_mm->private, size, alignment);
-
-       if (!tmp) {
-               return NULL;
-       }
-
-       memblock = kzalloc(sizeof(*memblock), GFP_KERNEL);
-
-       if (!memblock)
-               goto out;
-
-       memblock->mm_info = tmp;
-       memblock->mm = sman_mm;
-       memblock->sman = sman;
-
-       if (drm_ht_just_insert_please
-           (&sman->user_hash_tab, &memblock->user_hash,
-            (unsigned long)memblock, 32, 0, 0))
-               goto out1;
-
-       owner_item = drm_sman_get_owner_item(sman, owner);
-       if (!owner_item)
-               goto out2;
-
-       list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
-
-       return memblock;
-
-out2:
-       drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
-out1:
-       kfree(memblock);
-out:
-       sman_mm->free(sman_mm->private, tmp);
-
-       return NULL;
-}
-
-EXPORT_SYMBOL(drm_sman_alloc);
-
-static void drm_sman_free(struct drm_memblock_item *item)
-{
-       struct drm_sman *sman = item->sman;
-
-       list_del(&item->owner_list);
-       drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
-       item->mm->free(item->mm->private, item->mm_info);
-       kfree(item);
-}
-
-int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
-{
-       struct drm_hash_item *hash_item;
-       struct drm_memblock_item *memblock_item;
-
-       if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
-               return -EINVAL;
-
-       memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
-                                      user_hash);
-       drm_sman_free(memblock_item);
-       return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_free_key);
-
-static void drm_sman_remove_owner(struct drm_sman *sman,
-                                 struct drm_owner_item *owner_item)
-{
-       list_del(&owner_item->sman_list);
-       drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
-       kfree(owner_item);
-}
-
-int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
-{
-
-       struct drm_hash_item *hash_item;
-       struct drm_owner_item *owner_item;
-
-       if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
-               return -1;
-       }
-
-       owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
-       if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
-               drm_sman_remove_owner(sman, owner_item);
-               return -1;
-       }
-
-       return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_owner_clean);
-
-static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
-                                     struct drm_owner_item *owner_item)
-{
-       struct drm_memblock_item *entry, *next;
-
-       list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
-                                owner_list) {
-               drm_sman_free(entry);
-       }
-       drm_sman_remove_owner(sman, owner_item);
-}
-
-void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
-{
-
-       struct drm_hash_item *hash_item;
-       struct drm_owner_item *owner_item;
-
-       if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
-
-               return;
-       }
-
-       owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
-       drm_sman_do_owner_cleanup(sman, owner_item);
-}
-
-EXPORT_SYMBOL(drm_sman_owner_cleanup);
-
-void drm_sman_cleanup(struct drm_sman *sman)
-{
-       struct drm_owner_item *entry, *next;
-       unsigned int i;
-       struct drm_sman_mm *sman_mm;
-
-       list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
-               drm_sman_do_owner_cleanup(sman, entry);
-       }
-       if (sman->mm) {
-               for (i = 0; i < sman->num_managers; ++i) {
-                       sman_mm = &sman->mm[i];
-                       if (sman_mm->private) {
-                               sman_mm->destroy(sman_mm->private);
-                               sman_mm->private = NULL;
-                       }
-               }
-       }
-}
-
-EXPORT_SYMBOL(drm_sman_cleanup);
index 847466a..f9aaa56 100644 (file)
@@ -18,3 +18,10 @@ config DRM_EXYNOS_FIMD
        help
          Choose this option if you want to use Exynos FIMD for DRM.
          If M is selected, the module will be called exynos_drm_fimd
+
+config DRM_EXYNOS_HDMI
+       tristate "Exynos DRM HDMI"
+       depends on DRM_EXYNOS
+       help
+         Choose this option if you want to use Exynos HDMI for DRM.
+         If M is selected, the module will be called exynos_drm_hdmi
index 0496d3f..395e69c 100644 (file)
@@ -5,7 +5,10 @@
 ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
 exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
                exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
-               exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o
+               exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
+               exynos_drm_plane.o
 
 obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
 obj-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
+obj-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o exynos_ddc.o \
+                                exynos_hdmiphy.o exynos_drm_hdmi.o
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
new file mode 100644 (file)
index 0000000..84b614f
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ *     Seung-Woo Kim <sw0312.kim@samsung.com>
+ *     Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+
+#include "exynos_drm_drv.h"
+#include "exynos_hdmi.h"
+
+static int s5p_ddc_probe(struct i2c_client *client,
+                       const struct i2c_device_id *dev_id)
+{
+       hdmi_attach_ddc_client(client);
+
+       dev_info(&client->adapter->dev, "attached s5p_ddc "
+               "into i2c adapter successfully\n");
+
+       return 0;
+}
+
+static int s5p_ddc_remove(struct i2c_client *client)
+{
+       dev_info(&client->adapter->dev, "detached s5p_ddc "
+               "from i2c adapter successfully\n");
+
+       return 0;
+}
+
+static struct i2c_device_id ddc_idtable[] = {
+       {"s5p_ddc", 0},
+       { },
+};
+
+struct i2c_driver ddc_driver = {
+       .driver = {
+               .name = "s5p_ddc",
+               .owner = THIS_MODULE,
+       },
+       .id_table       = ddc_idtable,
+       .probe          = s5p_ddc_probe,
+       .remove         = __devexit_p(s5p_ddc_remove),
+       .command                = NULL,
+};
+EXPORT_SYMBOL(ddc_driver);
index 2bb07bc..3cf785c 100644 (file)
@@ -73,7 +73,7 @@ struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
        buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
        if (!buffer) {
                DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
-               return ERR_PTR(-ENOMEM);
+               return NULL;
        }
 
        buffer->size = size;
@@ -84,8 +84,7 @@ struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
         */
        if (lowlevel_buffer_allocate(dev, buffer) < 0) {
                kfree(buffer);
-               buffer = NULL;
-               return ERR_PTR(-ENOMEM);
+               return NULL;
        }
 
        return buffer;
index 6e91f9c..c913f2b 100644 (file)
@@ -30,9 +30,6 @@
 struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
                unsigned int size);
 
-/* get memory information of a drm framebuffer. */
-struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
-
 /* remove allocated physical memory. */
 void exynos_drm_buf_destroy(struct drm_device *dev,
                struct exynos_drm_gem_buf *buffer);
index ee43cc2..e3861ac 100644 (file)
@@ -34,7 +34,6 @@
 #include "exynos_drm_fb.h"
 #include "exynos_drm_encoder.h"
 #include "exynos_drm_gem.h"
-#include "exynos_drm_buf.h"
 
 #define to_exynos_crtc(x)      container_of(x, struct exynos_drm_crtc,\
                                drm_crtc)
  *     drm framework doesn't support multiple irq yet.
  *     we can refer to the crtc to current hardware interrupt occured through
  *     this pipe value.
+ * @dpms: store the crtc dpms value
  */
 struct exynos_drm_crtc {
        struct drm_crtc                 drm_crtc;
        struct exynos_drm_overlay       overlay;
        unsigned int                    pipe;
+       unsigned int                    dpms;
 };
 
 static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
@@ -78,19 +79,23 @@ int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
        struct exynos_drm_gem_buf *buffer;
        unsigned int actual_w;
        unsigned int actual_h;
+       int nr = exynos_drm_format_num_buffers(fb->pixel_format);
+       int i;
+
+       for (i = 0; i < nr; i++) {
+               buffer = exynos_drm_fb_buffer(fb, i);
+               if (!buffer) {
+                       DRM_LOG_KMS("buffer is null\n");
+                       return -EFAULT;
+               }
 
-       buffer = exynos_drm_fb_get_buf(fb);
-       if (!buffer) {
-               DRM_LOG_KMS("buffer is null.\n");
-               return -EFAULT;
-       }
-
-       overlay->dma_addr = buffer->dma_addr;
-       overlay->vaddr = buffer->kvaddr;
+               overlay->dma_addr[i] = buffer->dma_addr;
+               overlay->vaddr[i] = buffer->kvaddr;
 
-       DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
-                       (unsigned long)overlay->vaddr,
-                       (unsigned long)overlay->dma_addr);
+               DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
+                               i, (unsigned long)overlay->vaddr[i],
+                               (unsigned long)overlay->dma_addr[i]);
+       }
 
        actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w);
        actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h);
@@ -101,7 +106,8 @@ int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
        overlay->fb_width = fb->width;
        overlay->fb_height = fb->height;
        overlay->bpp = fb->bits_per_pixel;
-       overlay->pitch = fb->pitch;
+       overlay->pitch = fb->pitches[0];
+       overlay->pixel_format = fb->pixel_format;
 
        /* set overlay range to be displayed. */
        overlay->crtc_x = pos->crtc_x;
@@ -153,26 +159,37 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc)
 
 static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
+       struct drm_device *dev = crtc->dev;
        struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
 
        DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
 
+       if (exynos_crtc->dpms == mode) {
+               DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
+               return;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+
        switch (mode) {
        case DRM_MODE_DPMS_ON:
-               exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
-                               exynos_drm_encoder_crtc_commit);
+               exynos_drm_fn_encoder(crtc, &mode,
+                               exynos_drm_encoder_crtc_dpms);
+               exynos_crtc->dpms = mode;
                break;
        case DRM_MODE_DPMS_STANDBY:
        case DRM_MODE_DPMS_SUSPEND:
        case DRM_MODE_DPMS_OFF:
-               /* TODO */
-               exynos_drm_fn_encoder(crtc, NULL,
-                               exynos_drm_encoder_crtc_disable);
+               exynos_drm_fn_encoder(crtc, &mode,
+                               exynos_drm_encoder_crtc_dpms);
+               exynos_crtc->dpms = mode;
                break;
        default:
-               DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+               DRM_ERROR("unspecified mode %d\n", mode);
                break;
        }
+
+       mutex_unlock(&dev->struct_mutex);
 }
 
 static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
@@ -188,6 +205,28 @@ static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
+       /*
+        * when set_crtc is requested from user or at booting time,
+        * crtc->commit would be called without dpms call so if dpms is
+        * no power on then crtc->dpms should be called
+        * with DRM_MODE_DPMS_ON for the hardware power to be on.
+        */
+       if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) {
+               int mode = DRM_MODE_DPMS_ON;
+
+               /*
+                * enable hardware(power on) to all encoders hdmi connected
+                * to current crtc.
+                */
+               exynos_drm_crtc_dpms(crtc, mode);
+               /*
+                * enable dma to all encoders connected to current crtc and
+                * lcd panel.
+                */
+               exynos_drm_fn_encoder(crtc, &mode,
+                                       exynos_drm_encoder_dpms_from_crtc);
+       }
+
        exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
                        exynos_drm_encoder_crtc_commit);
 }
@@ -344,6 +383,8 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
        }
 
        exynos_crtc->pipe = nr;
+       exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
+       exynos_crtc->overlay.zpos = DEFAULT_ZPOS;
        crtc = &exynos_crtc->drm_crtc;
 
        private->crtc[nr] = crtc;
@@ -357,9 +398,14 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
 int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
 {
        struct exynos_drm_private *private = dev->dev_private;
+       struct exynos_drm_crtc *exynos_crtc =
+               to_exynos_crtc(private->crtc[crtc]);
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
+       if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
+               return -EPERM;
+
        exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
                        exynos_drm_enable_vblank);
 
@@ -369,9 +415,14 @@ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
 void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
 {
        struct exynos_drm_private *private = dev->dev_private;
+       struct exynos_drm_crtc *exynos_crtc =
+               to_exynos_crtc(private->crtc[crtc]);
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
+       if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
+               return;
+
        exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
                        exynos_drm_disable_vblank);
 }
index 53e2216..35889ca 100644 (file)
 #include "exynos_drm_fbdev.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_gem.h"
+#include "exynos_drm_plane.h"
 
-#define DRIVER_NAME    "exynos-drm"
+#define DRIVER_NAME    "exynos"
 #define DRIVER_DESC    "Samsung SoC DRM"
 #define DRIVER_DATE    "20110530"
 #define DRIVER_MAJOR   1
 #define DRIVER_MINOR   0
 
+#define VBLANK_OFF_DELAY       50000
+
 static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
 {
        struct exynos_drm_private *private;
@@ -77,6 +80,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
                        goto err_crtc;
        }
 
+       for (nr = 0; nr < MAX_PLANE; nr++) {
+               ret = exynos_plane_init(dev, nr);
+               if (ret)
+                       goto err_crtc;
+       }
+
        ret = drm_vblank_init(dev, MAX_CRTC);
        if (ret)
                goto err_crtc;
@@ -100,6 +109,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
                goto err_drm_device;
        }
 
+       drm_vblank_offdelay = VBLANK_OFF_DELAY;
+
        return 0;
 
 err_drm_device:
@@ -163,6 +174,18 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
                        DRM_AUTH),
        DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP,
                        exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(EXYNOS_PLANE_SET_ZPOS, exynos_plane_set_zpos_ioctl,
+                       DRM_UNLOCKED | DRM_AUTH),
+};
+
+static const struct file_operations exynos_drm_driver_fops = {
+       .owner          = THIS_MODULE,
+       .open           = drm_open,
+       .mmap           = exynos_drm_gem_mmap,
+       .poll           = drm_poll,
+       .read           = drm_read,
+       .unlocked_ioctl = drm_ioctl,
+       .release        = drm_release,
 };
 
 static struct drm_driver exynos_drm_driver = {
@@ -182,15 +205,7 @@ static struct drm_driver exynos_drm_driver = {
        .dumb_map_offset        = exynos_drm_gem_dumb_map_offset,
        .dumb_destroy           = exynos_drm_gem_dumb_destroy,
        .ioctls                 = exynos_ioctls,
-       .fops = {
-               .owner          = THIS_MODULE,
-               .open           = drm_open,
-               .mmap           = exynos_drm_gem_mmap,
-               .poll           = drm_poll,
-               .read           = drm_read,
-               .unlocked_ioctl = drm_ioctl,
-               .release        = drm_release,
-       },
+       .fops                   = &exynos_drm_driver_fops,
        .name   = DRIVER_NAME,
        .desc   = DRIVER_DESC,
        .date   = DRIVER_DATE,
index 5e02e6e..e685e1e 100644 (file)
 #include "drm.h"
 
 #define MAX_CRTC       2
+#define MAX_PLANE      5
+#define MAX_FB_BUFFER  3
+#define DEFAULT_ZPOS   -1
 
 struct drm_device;
 struct exynos_drm_overlay;
 struct drm_connector;
 
+extern unsigned int drm_vblank_offdelay;
+
 /* this enumerates display type. */
 enum exynos_drm_output_type {
        EXYNOS_DISPLAY_TYPE_NONE,
@@ -57,8 +62,8 @@ enum exynos_drm_output_type {
 struct exynos_drm_overlay_ops {
        void (*mode_set)(struct device *subdrv_dev,
                         struct exynos_drm_overlay *overlay);
-       void (*commit)(struct device *subdrv_dev);
-       void (*disable)(struct device *subdrv_dev);
+       void (*commit)(struct device *subdrv_dev, int zpos);
+       void (*disable)(struct device *subdrv_dev, int zpos);
 };
 
 /*
@@ -80,9 +85,11 @@ struct exynos_drm_overlay_ops {
  * @scan_flag: interlace or progressive way.
  *     (it could be DRM_MODE_FLAG_*)
  * @bpp: pixel size.(in bit)
- * @dma_addr: bus(accessed by dma) address to the memory region allocated
- *     for a overlay.
- * @vaddr: virtual memory addresss to this overlay.
+ * @pixel_format: fourcc pixel format of this overlay
+ * @dma_addr: array of bus(accessed by dma) address to the memory region
+ *           allocated for a overlay.
+ * @vaddr: array of virtual memory addresss to this overlay.
+ * @zpos: order of overlay layer(z position).
  * @default_win: a window to be enabled.
  * @color_key: color key on or off.
  * @index_color: if using color key feature then this value would be used
@@ -109,8 +116,10 @@ struct exynos_drm_overlay {
        unsigned int scan_flag;
        unsigned int bpp;
        unsigned int pitch;
-       dma_addr_t dma_addr;
-       void __iomem *vaddr;
+       uint32_t pixel_format;
+       dma_addr_t dma_addr[MAX_FB_BUFFER];
+       void __iomem *vaddr[MAX_FB_BUFFER];
+       int zpos;
 
        bool default_win;
        bool color_key;
@@ -144,17 +153,19 @@ struct exynos_drm_display_ops {
 /*
  * Exynos drm manager ops
  *
+ * @dpms: control device power.
+ * @apply: set timing, vblank and overlay data to registers.
  * @mode_set: convert drm_display_mode to hw specific display mode and
  *           would be called by encoder->mode_set().
  * @commit: set current hw specific display mode to hw.
- * @disable: disable hardware specific display mode.
  * @enable_vblank: specific driver callback for enabling vblank interrupt.
  * @disable_vblank: specific driver callback for disabling vblank interrupt.
  */
 struct exynos_drm_manager_ops {
+       void (*dpms)(struct device *subdrv_dev, int mode);
+       void (*apply)(struct device *subdrv_dev);
        void (*mode_set)(struct device *subdrv_dev, void *mode);
        void (*commit)(struct device *subdrv_dev);
-       void (*disable)(struct device *subdrv_dev);
        int (*enable_vblank)(struct device *subdrv_dev);
        void (*disable_vblank)(struct device *subdrv_dev);
 };
index 1530614..86b93dd 100644 (file)
  * @drm_encoder: encoder object.
  * @manager: specific encoder has its own manager to control a hardware
  *     appropriately and we can access a hardware drawing on this manager.
+ * @dpms: store the encoder dpms value.
  */
 struct exynos_drm_encoder {
        struct drm_encoder              drm_encoder;
        struct exynos_drm_manager       *manager;
+       int dpms;
 };
 
-static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void exynos_drm_display_power(struct drm_encoder *encoder, int mode)
 {
        struct drm_device *dev = encoder->dev;
        struct drm_connector *connector;
        struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               if (connector->encoder == encoder) {
+                       struct exynos_drm_display_ops *display_ops =
+                                                       manager->display_ops;
+
+                       DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
+                                       connector->base.id, mode);
+                       if (display_ops && display_ops->power_on)
+                               display_ops->power_on(manager->dev, mode);
+               }
+       }
+}
+
+static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
        struct exynos_drm_manager_ops *manager_ops = manager->ops;
+       struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
 
        DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode);
 
+       if (exynos_encoder->dpms == mode) {
+               DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
+               return;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+
        switch (mode) {
        case DRM_MODE_DPMS_ON:
-               if (manager_ops && manager_ops->commit)
-                       manager_ops->commit(manager->dev);
+               if (manager_ops && manager_ops->apply)
+                       manager_ops->apply(manager->dev);
+               exynos_drm_display_power(encoder, mode);
+               exynos_encoder->dpms = mode;
                break;
        case DRM_MODE_DPMS_STANDBY:
        case DRM_MODE_DPMS_SUSPEND:
        case DRM_MODE_DPMS_OFF:
-               /* TODO */
-               if (manager_ops && manager_ops->disable)
-                       manager_ops->disable(manager->dev);
+               exynos_drm_display_power(encoder, mode);
+               exynos_encoder->dpms = mode;
                break;
        default:
                DRM_ERROR("unspecified mode %d\n", mode);
                break;
        }
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               if (connector->encoder == encoder) {
-                       struct exynos_drm_display_ops *display_ops =
-                                                       manager->display_ops;
-
-                       DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
-                                       connector->base.id, mode);
-                       if (display_ops && display_ops->power_on)
-                               display_ops->power_on(manager->dev, mode);
-               }
-       }
+       mutex_unlock(&dev->struct_mutex);
 }
 
 static bool
@@ -169,7 +188,6 @@ static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
        exynos_encoder->manager->pipe = -1;
 
        drm_encoder_cleanup(encoder);
-       encoder->dev->mode_config.num_encoder--;
        kfree(exynos_encoder);
 }
 
@@ -199,6 +217,7 @@ exynos_drm_encoder_create(struct drm_device *dev,
                return NULL;
        }
 
+       exynos_encoder->dpms = DRM_MODE_DPMS_OFF;
        exynos_encoder->manager = manager;
        encoder = &exynos_encoder->drm_encoder;
        encoder->possible_crtcs = possible_crtcs;
@@ -275,12 +294,27 @@ void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data)
                manager_ops->disable_vblank(manager->dev);
 }
 
-void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
+void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder,
+                                         void *data)
 {
        struct exynos_drm_manager *manager =
                to_exynos_encoder(encoder)->manager;
        struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+       int zpos = DEFAULT_ZPOS;
+
+       if (data)
+               zpos = *(int *)data;
+
+       if (overlay_ops && overlay_ops->commit)
+               overlay_ops->commit(manager->dev, zpos);
+}
+
+void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
+{
+       struct exynos_drm_manager *manager =
+               to_exynos_encoder(encoder)->manager;
        int crtc = *(int *)data;
+       int zpos = DEFAULT_ZPOS;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -290,8 +324,53 @@ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
         */
        manager->pipe = crtc;
 
-       if (overlay_ops && overlay_ops->commit)
-               overlay_ops->commit(manager->dev);
+       exynos_drm_encoder_crtc_plane_commit(encoder, &zpos);
+}
+
+void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder, void *data)
+{
+       struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+       int mode = *(int *)data;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       exynos_drm_encoder_dpms(encoder, mode);
+
+       exynos_encoder->dpms = mode;
+}
+
+void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data)
+{
+       struct drm_device *dev = encoder->dev;
+       struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+       struct exynos_drm_manager *manager = exynos_encoder->manager;
+       struct exynos_drm_manager_ops *manager_ops = manager->ops;
+       struct drm_connector *connector;
+       int mode = *(int *)data;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (manager_ops && manager_ops->dpms)
+               manager_ops->dpms(manager->dev, mode);
+
+       /*
+        * set current dpms mode to the connector connected to
+        * current encoder. connector->dpms would be checked
+        * at drm_helper_connector_dpms()
+        */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+               if (connector->encoder == encoder)
+                       connector->dpms = mode;
+
+       /*
+        * if this condition is ok then it means that the crtc is already
+        * detached from encoder and last function for detaching is properly
+        * done, so clear pipe from manager to prevent repeated call.
+        */
+       if (mode > DRM_MODE_DPMS_ON) {
+               if (!encoder->crtc)
+                       manager->pipe = -1;
+       }
 }
 
 void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
@@ -310,19 +389,15 @@ void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data)
        struct exynos_drm_manager *manager =
                to_exynos_encoder(encoder)->manager;
        struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+       int zpos = DEFAULT_ZPOS;
 
        DRM_DEBUG_KMS("\n");
 
-       if (overlay_ops && overlay_ops->disable)
-               overlay_ops->disable(manager->dev);
+       if (data)
+               zpos = *(int *)data;
 
-       /*
-        * crtc is already detached from encoder and last
-        * function for detaching is properly done, so
-        * clear pipe from manager to prevent repeated call
-        */
-       if (!encoder->crtc)
-               manager->pipe = -1;
+       if (overlay_ops && overlay_ops->disable)
+               overlay_ops->disable(manager->dev, zpos);
 }
 
 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
index a22acfb..97b087a 100644 (file)
@@ -39,7 +39,12 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
                            void (*fn)(struct drm_encoder *, void *));
 void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
 void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder,
+                                         void *data);
 void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder,
+                                       void *data);
+void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data);
 void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data);
 void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data);
 
index 5bf4a1a..3733fe6 100644 (file)
@@ -33,7 +33,6 @@
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
-#include "exynos_drm_buf.h"
 #include "exynos_drm_gem.h"
 
 #define to_exynos_fb(x)        container_of(x, struct exynos_drm_fb, fb)
  * exynos specific framebuffer structure.
  *
  * @fb: drm framebuffer obejct.
- * @exynos_gem_obj: exynos specific gem object containing a gem object.
- * @buffer: pointer to exynos_drm_gem_buffer object.
- *     - contain the memory information to memory region allocated
- *     at default framebuffer creation.
+ * @exynos_gem_obj: array of exynos specific gem object containing a gem object.
  */
 struct exynos_drm_fb {
        struct drm_framebuffer          fb;
-       struct exynos_drm_gem_obj       *exynos_gem_obj;
-       struct exynos_drm_gem_buf       *buffer;
+       struct exynos_drm_gem_obj       *exynos_gem_obj[MAX_FB_BUFFER];
 };
 
 static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
@@ -61,13 +56,6 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
 
        drm_framebuffer_cleanup(fb);
 
-       /*
-        * default framebuffer has no gem object so
-        * a buffer of the default framebuffer should be released at here.
-        */
-       if (!exynos_fb->exynos_gem_obj && exynos_fb->buffer)
-               exynos_drm_buf_destroy(fb->dev, exynos_fb->buffer);
-
        kfree(exynos_fb);
        exynos_fb = NULL;
 }
@@ -81,7 +69,7 @@ static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
        return drm_gem_handle_create(file_priv,
-                       &exynos_fb->exynos_gem_obj->base, handle);
+                       &exynos_fb->exynos_gem_obj[0]->base, handle);
 }
 
 static int exynos_drm_fb_dirty(struct drm_framebuffer *fb,
@@ -102,134 +90,88 @@ static struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
        .dirty          = exynos_drm_fb_dirty,
 };
 
-static struct drm_framebuffer *
-exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
-                   struct drm_mode_fb_cmd *mode_cmd)
+struct drm_framebuffer *
+exynos_drm_framebuffer_init(struct drm_device *dev,
+                           struct drm_mode_fb_cmd2 *mode_cmd,
+                           struct drm_gem_object *obj)
 {
        struct exynos_drm_fb *exynos_fb;
-       struct drm_framebuffer *fb;
-       struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
-       struct drm_gem_object *obj;
-       unsigned int size;
        int ret;
 
-       DRM_DEBUG_KMS("%s\n", __FILE__);
-
-       mode_cmd->pitch = max(mode_cmd->pitch,
-                       mode_cmd->width * (mode_cmd->bpp >> 3));
-
-       DRM_LOG_KMS("drm fb create(%dx%d)\n",
-                       mode_cmd->width, mode_cmd->height);
-
        exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
        if (!exynos_fb) {
-               DRM_ERROR("failed to allocate exynos drm framebuffer.\n");
+               DRM_ERROR("failed to allocate exynos drm framebuffer\n");
                return ERR_PTR(-ENOMEM);
        }
 
-       fb = &exynos_fb->fb;
-       ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
+       ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
        if (ret) {
-               DRM_ERROR("failed to initialize framebuffer.\n");
-               goto err_init;
+               DRM_ERROR("failed to initialize framebuffer\n");
+               return ERR_PTR(ret);
        }
 
-       DRM_LOG_KMS("create: fb id: %d\n", fb->base.id);
+       drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+       exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
 
-       size = mode_cmd->pitch * mode_cmd->height;
+       return &exynos_fb->fb;
+}
 
-       /*
-        * mode_cmd->handle could be NULL at booting time or
-        * with user request. if NULL, a new buffer or a gem object
-        * would be allocated.
-        */
-       if (!mode_cmd->handle) {
-               if (!file_priv) {
-                       struct exynos_drm_gem_buf *buffer;
-
-                       /*
-                        * in case that file_priv is NULL, it allocates
-                        * only buffer and this buffer would be used
-                        * for default framebuffer.
-                        */
-                       buffer = exynos_drm_buf_create(dev, size);
-                       if (IS_ERR(buffer)) {
-                               ret = PTR_ERR(buffer);
-                               goto err_buffer;
-                       }
-
-                       exynos_fb->buffer = buffer;
-
-                       DRM_LOG_KMS("default: dma_addr = 0x%lx, size = 0x%x\n",
-                                       (unsigned long)buffer->dma_addr, size);
-
-                       goto out;
-               } else {
-                       exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
-                                                       &mode_cmd->handle,
-                                                       size);
-                       if (IS_ERR(exynos_gem_obj)) {
-                               ret = PTR_ERR(exynos_gem_obj);
-                               goto err_buffer;
-                       }
-               }
-       } else {
-               obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
-               if (!obj) {
-                       DRM_ERROR("failed to lookup gem object.\n");
-                       goto err_buffer;
-               }
+static struct drm_framebuffer *
+exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+                     struct drm_mode_fb_cmd2 *mode_cmd)
+{
+       struct drm_gem_object *obj;
+       struct drm_framebuffer *fb;
+       struct exynos_drm_fb *exynos_fb;
+       int nr;
+       int i;
 
-               exynos_gem_obj = to_exynos_gem_obj(obj);
+       DRM_DEBUG_KMS("%s\n", __FILE__);
 
-               drm_gem_object_unreference_unlocked(obj);
+       obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+       if (!obj) {
+               DRM_ERROR("failed to lookup gem object\n");
+               return ERR_PTR(-ENOENT);
        }
 
-       /*
-        * if got a exynos_gem_obj from either a handle or
-        * a new creation then exynos_fb->exynos_gem_obj is NULL
-        * so that default framebuffer has no its own gem object,
-        * only its own buffer object.
-        */
-       exynos_fb->buffer = exynos_gem_obj->buffer;
-
-       DRM_LOG_KMS("dma_addr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
-                       (unsigned long)exynos_fb->buffer->dma_addr, size,
-                       (unsigned int)&exynos_gem_obj->base);
+       drm_gem_object_unreference_unlocked(obj);
 
-out:
-       exynos_fb->exynos_gem_obj = exynos_gem_obj;
+       fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
+       if (IS_ERR(fb))
+               return fb;
 
-       drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+       exynos_fb = to_exynos_fb(fb);
+       nr = exynos_drm_format_num_buffers(fb->pixel_format);
 
-       return fb;
-
-err_buffer:
-       drm_framebuffer_cleanup(fb);
-
-err_init:
-       kfree(exynos_fb);
+       for (i = 1; i < nr; i++) {
+               obj = drm_gem_object_lookup(dev, file_priv,
+                               mode_cmd->handles[i]);
+               if (!obj) {
+                       DRM_ERROR("failed to lookup gem object\n");
+                       exynos_drm_fb_destroy(fb);
+                       return ERR_PTR(-ENOENT);
+               }
 
-       return ERR_PTR(ret);
-}
+               drm_gem_object_unreference_unlocked(obj);
 
-struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
-                                             struct drm_file *file_priv,
-                                             struct drm_mode_fb_cmd *mode_cmd)
-{
-       DRM_DEBUG_KMS("%s\n", __FILE__);
+               exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
+       }
 
-       return exynos_drm_fb_init(file_priv, dev, mode_cmd);
+       return fb;
 }
 
-struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
+struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
+                                               int index)
 {
        struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
        struct exynos_drm_gem_buf *buffer;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       buffer = exynos_fb->buffer;
+       if (index >= MAX_FB_BUFFER)
+               return NULL;
+
+       buffer = exynos_fb->exynos_gem_obj[index]->buffer;
        if (!buffer)
                return NULL;
 
@@ -250,7 +192,7 @@ static void exynos_drm_output_poll_changed(struct drm_device *dev)
 }
 
 static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
-       .fb_create = exynos_drm_fb_create,
+       .fb_create = exynos_user_fb_create,
        .output_poll_changed = exynos_drm_output_poll_changed,
 };
 
index eb35931..3ecb30d 100644 (file)
 #ifndef _EXYNOS_DRM_FB_H_
 #define _EXYNOS_DRM_FB_H
 
-struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
-                                             struct drm_file *filp,
-                                             struct drm_mode_fb_cmd *mode_cmd);
+static inline int exynos_drm_format_num_buffers(uint32_t format)
+{
+       switch (format) {
+       case DRM_FORMAT_NV12M:
+       case DRM_FORMAT_NV12MT:
+               return 2;
+       case DRM_FORMAT_YUV420M:
+               return 3;
+       default:
+               return 1;
+       }
+}
+
+struct drm_framebuffer *
+exynos_drm_framebuffer_init(struct drm_device *dev,
+                           struct drm_mode_fb_cmd2 *mode_cmd,
+                           struct drm_gem_object *obj);
+
+/* get memory information of a drm framebuffer */
+struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
+                                                int index);
 
 void exynos_drm_mode_config_init(struct drm_device *dev);
 
index 836f410..d7ae29d 100644 (file)
@@ -34,7 +34,6 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_gem.h"
-#include "exynos_drm_buf.h"
 
 #define MAX_CONNECTOR          4
 #define PREFERRED_BPP          32
@@ -43,8 +42,8 @@
                                drm_fb_helper)
 
 struct exynos_drm_fbdev {
-       struct drm_fb_helper    drm_fb_helper;
-       struct drm_framebuffer  *fb;
+       struct drm_fb_helper            drm_fb_helper;
+       struct exynos_drm_gem_obj       *exynos_gem_obj;
 };
 
 static int exynos_drm_fbdev_set_par(struct fb_info *info)
@@ -90,26 +89,24 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
 {
        struct fb_info *fbi = helper->fbdev;
        struct drm_device *dev = helper->dev;
-       struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper);
        struct exynos_drm_gem_buf *buffer;
        unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
        unsigned long offset;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       exynos_fb->fb = fb;
-
-       drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth);
+       drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
        drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
 
-       buffer = exynos_drm_fb_get_buf(fb);
+       /* RGB formats use only one buffer */
+       buffer = exynos_drm_fb_buffer(fb, 0);
        if (!buffer) {
                DRM_LOG_KMS("buffer is null.\n");
                return -EFAULT;
        }
 
        offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
-       offset += fbi->var.yoffset * fb->pitch;
+       offset += fbi->var.yoffset * fb->pitches[0];
 
        dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
        fbi->screen_base = buffer->kvaddr + offset;
@@ -124,10 +121,12 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
                                    struct drm_fb_helper_surface_size *sizes)
 {
        struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
+       struct exynos_drm_gem_obj *exynos_gem_obj;
        struct drm_device *dev = helper->dev;
        struct fb_info *fbi;
-       struct drm_mode_fb_cmd mode_cmd = { 0 };
+       struct drm_mode_fb_cmd2 mode_cmd = { 0 };
        struct platform_device *pdev = dev->platformdev;
+       unsigned long size;
        int ret;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -138,8 +137,9 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
 
        mode_cmd.width = sizes->surface_width;
        mode_cmd.height = sizes->surface_height;
-       mode_cmd.bpp = sizes->surface_bpp;
-       mode_cmd.depth = sizes->surface_depth;
+       mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
+       mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+                                                         sizes->surface_depth);
 
        mutex_lock(&dev->struct_mutex);
 
@@ -150,14 +150,23 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
                goto out;
        }
 
-       exynos_fbdev->fb = exynos_drm_fb_create(dev, NULL, &mode_cmd);
-       if (IS_ERR_OR_NULL(exynos_fbdev->fb)) {
+       size = mode_cmd.pitches[0] * mode_cmd.height;
+       exynos_gem_obj = exynos_drm_gem_create(dev, size);
+       if (IS_ERR(exynos_gem_obj)) {
+               ret = PTR_ERR(exynos_gem_obj);
+               goto out;
+       }
+
+       exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
+
+       helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
+                       &exynos_gem_obj->base);
+       if (IS_ERR_OR_NULL(helper->fb)) {
                DRM_ERROR("failed to create drm framebuffer.\n");
-               ret = PTR_ERR(exynos_fbdev->fb);
+               ret = PTR_ERR(helper->fb);
                goto out;
        }
 
-       helper->fb = exynos_fbdev->fb;
        helper->fbdev = fbi;
 
        fbi->par = helper;
@@ -171,8 +180,10 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
        }
 
        ret = exynos_drm_fbdev_update(helper, helper->fb);
-       if (ret < 0)
+       if (ret < 0) {
                fb_dealloc_cmap(&fbi->cmap);
+               goto out;
+       }
 
 /*
  * if failed, all resources allocated above would be released by
@@ -205,34 +216,42 @@ static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper,
 {
        struct drm_device *dev = helper->dev;
        struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
-       struct drm_framebuffer *fb = exynos_fbdev->fb;
-       struct drm_mode_fb_cmd mode_cmd = { 0 };
+       struct exynos_drm_gem_obj *exynos_gem_obj;
+       struct drm_framebuffer *fb = helper->fb;
+       struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+       unsigned long size;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       if (helper->fb != fb) {
-               DRM_ERROR("drm framebuffer is different\n");
-               return -EINVAL;
-       }
-
        if (exynos_drm_fbdev_is_samefb(fb, sizes))
                return 0;
 
        mode_cmd.width = sizes->surface_width;
        mode_cmd.height = sizes->surface_height;
-       mode_cmd.bpp = sizes->surface_bpp;
-       mode_cmd.depth = sizes->surface_depth;
+       mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
+       mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+                                                         sizes->surface_depth);
+
+       if (exynos_fbdev->exynos_gem_obj)
+               exynos_drm_gem_destroy(exynos_fbdev->exynos_gem_obj);
 
        if (fb->funcs->destroy)
                fb->funcs->destroy(fb);
 
-       exynos_fbdev->fb = exynos_drm_fb_create(dev, NULL, &mode_cmd);
-       if (IS_ERR(exynos_fbdev->fb)) {
-               DRM_ERROR("failed to allocate fb.\n");
-               return PTR_ERR(exynos_fbdev->fb);
+       size = mode_cmd.pitches[0] * mode_cmd.height;
+       exynos_gem_obj = exynos_drm_gem_create(dev, size);
+       if (IS_ERR(exynos_gem_obj))
+               return PTR_ERR(exynos_gem_obj);
+
+       exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
+
+       helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
+                       &exynos_gem_obj->base);
+       if (IS_ERR_OR_NULL(helper->fb)) {
+               DRM_ERROR("failed to create drm framebuffer.\n");
+               return PTR_ERR(helper->fb);
        }
 
-       helper->fb = exynos_fbdev->fb;
        return exynos_drm_fbdev_update(helper, helper->fb);
 }
 
@@ -366,6 +385,9 @@ void exynos_drm_fbdev_fini(struct drm_device *dev)
 
        fbdev = to_exynos_fbdev(private->fb_helper);
 
+       if (fbdev->exynos_gem_obj)
+               exynos_drm_gem_destroy(fbdev->exynos_gem_obj);
+
        exynos_drm_fbdev_destroy(dev, private->fb_helper);
        kfree(fbdev);
        private->fb_helper = NULL;
index db3b3d9..ca83139 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/pm_runtime.h>
 
 #include <drm/exynos_drm.h>
 #include <plat/regs-fb-v4.h>
@@ -68,6 +69,7 @@ struct fimd_win_data {
        void __iomem            *vaddr;
        unsigned int            buf_offsize;
        unsigned int            line_size;      /* bytes */
+       bool                    enabled;
 };
 
 struct fimd_context {
@@ -84,6 +86,8 @@ struct fimd_context {
        unsigned long                   irq_flags;
        u32                             vidcon0;
        u32                             vidcon1;
+       bool                            suspended;
+       struct mutex                    lock;
 
        struct fb_videomode             *timing;
 };
@@ -119,7 +123,7 @@ static int fimd_display_power_on(struct device *dev, int mode)
 {
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       /* TODO. */
+       /* TODO */
 
        return 0;
 }
@@ -132,12 +136,68 @@ static struct exynos_drm_display_ops fimd_display_ops = {
        .power_on = fimd_display_power_on,
 };
 
+static void fimd_dpms(struct device *subdrv_dev, int mode)
+{
+       struct fimd_context *ctx = get_fimd_context(subdrv_dev);
+
+       DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode);
+
+       mutex_lock(&ctx->lock);
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               /*
+                * enable fimd hardware only if suspended status.
+                *
+                * P.S. fimd_dpms function would be called at booting time so
+                * clk_enable could be called double time.
+                */
+               if (ctx->suspended)
+                       pm_runtime_get_sync(subdrv_dev);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               pm_runtime_put_sync(subdrv_dev);
+               break;
+       default:
+               DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+               break;
+       }
+
+       mutex_unlock(&ctx->lock);
+}
+
+static void fimd_apply(struct device *subdrv_dev)
+{
+       struct fimd_context *ctx = get_fimd_context(subdrv_dev);
+       struct exynos_drm_manager *mgr = &ctx->subdrv.manager;
+       struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
+       struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
+       struct fimd_win_data *win_data;
+       int i;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       for (i = 0; i < WINDOWS_NR; i++) {
+               win_data = &ctx->win_data[i];
+               if (win_data->enabled && (ovl_ops && ovl_ops->commit))
+                       ovl_ops->commit(subdrv_dev, i);
+       }
+
+       if (mgr_ops && mgr_ops->commit)
+               mgr_ops->commit(subdrv_dev);
+}
+
 static void fimd_commit(struct device *dev)
 {
        struct fimd_context *ctx = get_fimd_context(dev);
        struct fb_videomode *timing = ctx->timing;
        u32 val;
 
+       if (ctx->suspended)
+               return;
+
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
        /* setup polarity values from machine code. */
@@ -177,40 +237,6 @@ static void fimd_commit(struct device *dev)
        writel(val, ctx->regs + VIDCON0);
 }
 
-static void fimd_disable(struct device *dev)
-{
-       struct fimd_context *ctx = get_fimd_context(dev);
-       struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
-       struct drm_device *drm_dev = subdrv->drm_dev;
-       struct exynos_drm_manager *manager = &subdrv->manager;
-       u32 val;
-
-       DRM_DEBUG_KMS("%s\n", __FILE__);
-
-       /* fimd dma off */
-       val = readl(ctx->regs + VIDCON0);
-       val &= ~(VIDCON0_ENVID | VIDCON0_ENVID_F);
-       writel(val, ctx->regs + VIDCON0);
-
-       /*
-        * if vblank is enabled status with dma off then
-        * it disables vsync interrupt.
-        */
-       if (drm_dev->vblank_enabled[manager->pipe] &&
-               atomic_read(&drm_dev->vblank_refcount[manager->pipe])) {
-               drm_vblank_put(drm_dev, manager->pipe);
-
-               /*
-                * if vblank_disable_allowed is 0 then disable
-                * vsync interrupt right now else the vsync interrupt
-                * would be disabled by drm timer once a current process
-                * gives up ownershop of vblank event.
-                */
-               if (!drm_dev->vblank_disable_allowed)
-                       drm_vblank_off(drm_dev, manager->pipe);
-       }
-}
-
 static int fimd_enable_vblank(struct device *dev)
 {
        struct fimd_context *ctx = get_fimd_context(dev);
@@ -218,6 +244,9 @@ static int fimd_enable_vblank(struct device *dev)
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
+       if (ctx->suspended)
+               return -EPERM;
+
        if (!test_and_set_bit(0, &ctx->irq_flags)) {
                val = readl(ctx->regs + VIDINTCON0);
 
@@ -242,6 +271,9 @@ static void fimd_disable_vblank(struct device *dev)
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
+       if (ctx->suspended)
+               return;
+
        if (test_and_clear_bit(0, &ctx->irq_flags)) {
                val = readl(ctx->regs + VIDINTCON0);
 
@@ -253,8 +285,9 @@ static void fimd_disable_vblank(struct device *dev)
 }
 
 static struct exynos_drm_manager_ops fimd_manager_ops = {
+       .dpms = fimd_dpms,
+       .apply = fimd_apply,
        .commit = fimd_commit,
-       .disable = fimd_disable,
        .enable_vblank = fimd_enable_vblank,
        .disable_vblank = fimd_disable_vblank,
 };
@@ -264,6 +297,7 @@ static void fimd_win_mode_set(struct device *dev,
 {
        struct fimd_context *ctx = get_fimd_context(dev);
        struct fimd_win_data *win_data;
+       int win;
        unsigned long offset;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -273,12 +307,19 @@ static void fimd_win_mode_set(struct device *dev,
                return;
        }
 
+       win = overlay->zpos;
+       if (win == DEFAULT_ZPOS)
+               win = ctx->default_win;
+
+       if (win < 0 || win > WINDOWS_NR)
+               return;
+
        offset = overlay->fb_x * (overlay->bpp >> 3);
        offset += overlay->fb_y * overlay->pitch;
 
        DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch);
 
-       win_data = &ctx->win_data[ctx->default_win];
+       win_data = &ctx->win_data[win];
 
        win_data->offset_x = overlay->crtc_x;
        win_data->offset_y = overlay->crtc_y;
@@ -286,8 +327,8 @@ static void fimd_win_mode_set(struct device *dev,
        win_data->ovl_height = overlay->crtc_height;
        win_data->fb_width = overlay->fb_width;
        win_data->fb_height = overlay->fb_height;
-       win_data->dma_addr = overlay->dma_addr + offset;
-       win_data->vaddr = overlay->vaddr + offset;
+       win_data->dma_addr = overlay->dma_addr[0] + offset;
+       win_data->vaddr = overlay->vaddr[0] + offset;
        win_data->bpp = overlay->bpp;
        win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
                                (overlay->bpp >> 3);
@@ -381,15 +422,21 @@ static void fimd_win_set_colkey(struct device *dev, unsigned int win)
        writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
 }
 
-static void fimd_win_commit(struct device *dev)
+static void fimd_win_commit(struct device *dev, int zpos)
 {
        struct fimd_context *ctx = get_fimd_context(dev);
        struct fimd_win_data *win_data;
-       int win = ctx->default_win;
+       int win = zpos;
        unsigned long val, alpha, size;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
+       if (ctx->suspended)
+               return;
+
+       if (win == DEFAULT_ZPOS)
+               win = ctx->default_win;
+
        if (win < 0 || win > WINDOWS_NR)
                return;
 
@@ -472,24 +519,37 @@ static void fimd_win_commit(struct device *dev)
        if (win != 0)
                fimd_win_set_colkey(dev, win);
 
+       /* wincon */
+       val = readl(ctx->regs + WINCON(win));
+       val |= WINCONx_ENWIN;
+       writel(val, ctx->regs + WINCON(win));
+
        /* Enable DMA channel and unprotect windows */
        val = readl(ctx->regs + SHADOWCON);
        val |= SHADOWCON_CHx_ENABLE(win);
        val &= ~SHADOWCON_WINx_PROTECT(win);
        writel(val, ctx->regs + SHADOWCON);
+
+       win_data->enabled = true;
 }
 
-static void fimd_win_disable(struct device *dev)
+static void fimd_win_disable(struct device *dev, int zpos)
 {
        struct fimd_context *ctx = get_fimd_context(dev);
-       int win = ctx->default_win;
+       struct fimd_win_data *win_data;
+       int win = zpos;
        u32 val;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
+       if (win == DEFAULT_ZPOS)
+               win = ctx->default_win;
+
        if (win < 0 || win > WINDOWS_NR)
                return;
 
+       win_data = &ctx->win_data[win];
+
        /* protect windows */
        val = readl(ctx->regs + SHADOWCON);
        val |= SHADOWCON_WINx_PROTECT(win);
@@ -505,6 +565,8 @@ static void fimd_win_disable(struct device *dev)
        val &= ~SHADOWCON_CHx_ENABLE(win);
        val &= ~SHADOWCON_WINx_PROTECT(win);
        writel(val, ctx->regs + SHADOWCON);
+
+       win_data->enabled = false;
 }
 
 static struct exynos_drm_overlay_ops fimd_overlay_ops = {
@@ -540,9 +602,17 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
                wake_up_interruptible(&e->base.file_priv->event_wait);
        }
 
-       if (is_checked)
+       if (is_checked) {
                drm_vblank_put(drm_dev, crtc);
 
+               /*
+                * don't off vblank if vblank_disable_allowed is 1,
+                * because vblank would be off by timer handler.
+                */
+               if (!drm_dev->vblank_disable_allowed)
+                       drm_vblank_off(drm_dev, crtc);
+       }
+
        spin_unlock_irqrestore(&drm_dev->event_lock, flags);
 }
 
@@ -560,19 +630,14 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
                /* VSYNC interrupt */
                writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
 
-       /*
-        * in case that vblank_disable_allowed is 1, it could induce
-        * the problem that manager->pipe could be -1 because with
-        * disable callback, vsync interrupt isn't disabled and at this moment,
-        * vsync interrupt could occur. the vsync interrupt would be disabled
-        * by timer handler later.
-        */
-       if (manager->pipe == -1)
-               return IRQ_HANDLED;
+       /* check the crtc is detached already from encoder */
+       if (manager->pipe < 0)
+               goto out;
 
        drm_handle_vblank(drm_dev, manager->pipe);
        fimd_finish_pageflip(drm_dev, manager->pipe);
 
+out:
        return IRQ_HANDLED;
 }
 
@@ -590,6 +655,13 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
         */
        drm_dev->irq_enabled = 1;
 
+       /*
+        * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+        * by drm timer once a current process gives up ownership of
+        * vblank event.(after drm_vblank_put function is called)
+        */
+       drm_dev->vblank_disable_allowed = 1;
+
        return 0;
 }
 
@@ -739,9 +811,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
 
        ctx->irq = res->start;
 
-       for (win = 0; win < WINDOWS_NR; win++)
-               fimd_clear_win(ctx, win);
-
        ret = request_irq(ctx->irq, fimd_irq_handler, 0, "drm_fimd", ctx);
        if (ret < 0) {
                dev_err(dev, "irq request failed.\n");
@@ -769,7 +838,17 @@ static int __devinit fimd_probe(struct platform_device *pdev)
        subdrv->manager.display_ops = &fimd_display_ops;
        subdrv->manager.dev = dev;
 
+       mutex_init(&ctx->lock);
+
        platform_set_drvdata(pdev, ctx);
+
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+       pm_runtime_get_sync(dev);
+
+       for (win = 0; win < WINDOWS_NR; win++)
+               fimd_clear_win(ctx, win);
+
        exynos_drm_subdrv_register(subdrv);
 
        return 0;
@@ -797,14 +876,25 @@ err_clk_get:
 
 static int __devexit fimd_remove(struct platform_device *pdev)
 {
+       struct device *dev = &pdev->dev;
        struct fimd_context *ctx = platform_get_drvdata(pdev);
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
        exynos_drm_subdrv_unregister(&ctx->subdrv);
 
+       if (ctx->suspended)
+               goto out;
+
        clk_disable(ctx->lcd_clk);
        clk_disable(ctx->bus_clk);
+
+       pm_runtime_set_suspended(dev);
+       pm_runtime_put_sync(dev);
+
+out:
+       pm_runtime_disable(dev);
+
        clk_put(ctx->lcd_clk);
        clk_put(ctx->bus_clk);
 
@@ -818,12 +908,102 @@ static int __devexit fimd_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int fimd_suspend(struct device *dev)
+{
+       int ret;
+
+       if (pm_runtime_suspended(dev))
+               return 0;
+
+       ret = pm_runtime_suspend(dev);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int fimd_resume(struct device *dev)
+{
+       int ret;
+
+       ret = pm_runtime_resume(dev);
+       if (ret < 0) {
+               DRM_ERROR("failed to resume runtime pm.\n");
+               return ret;
+       }
+
+       pm_runtime_disable(dev);
+
+       ret = pm_runtime_set_active(dev);
+       if (ret < 0) {
+               DRM_ERROR("failed to active runtime pm.\n");
+               pm_runtime_enable(dev);
+               pm_runtime_suspend(dev);
+               return ret;
+       }
+
+       pm_runtime_enable(dev);
+
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimd_runtime_suspend(struct device *dev)
+{
+       struct fimd_context *ctx = get_fimd_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       clk_disable(ctx->lcd_clk);
+       clk_disable(ctx->bus_clk);
+
+       ctx->suspended = true;
+       return 0;
+}
+
+static int fimd_runtime_resume(struct device *dev)
+{
+       struct fimd_context *ctx = get_fimd_context(dev);
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       ret = clk_enable(ctx->bus_clk);
+       if (ret < 0)
+               return ret;
+
+       ret = clk_enable(ctx->lcd_clk);
+       if  (ret < 0) {
+               clk_disable(ctx->bus_clk);
+               return ret;
+       }
+
+       ctx->suspended = false;
+
+       /* if vblank was enabled status, enable it again. */
+       if (test_and_clear_bit(0, &ctx->irq_flags))
+               fimd_enable_vblank(dev);
+
+       fimd_apply(dev);
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops fimd_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
+       SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL)
+};
+
 static struct platform_driver fimd_driver = {
        .probe          = fimd_probe,
        .remove         = __devexit_p(fimd_remove),
        .driver         = {
                .name   = "exynos4-fb",
                .owner  = THIS_MODULE,
+               .pm     = &fimd_pm_ops,
        },
 };
 
index aba0fe4..025abb3 100644 (file)
@@ -55,17 +55,54 @@ static unsigned int convert_to_vm_err_msg(int msg)
        return out_msg;
 }
 
-static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
+static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
+                                       struct drm_file *file_priv,
+                                       unsigned int *handle)
 {
+       int ret;
+
+       /*
+        * allocate a id of idr table where the obj is registered
+        * and handle has the id what user can see.
+        */
+       ret = drm_gem_handle_create(file_priv, obj, handle);
+       if (ret)
+               return ret;
+
+       DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
+
+       /* drop reference from allocate - handle holds it now. */
+       drm_gem_object_unreference_unlocked(obj);
+
+       return 0;
+}
+
+void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
+{
+       struct drm_gem_object *obj;
+
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
+       if (!exynos_gem_obj)
+               return;
+
+       obj = &exynos_gem_obj->base;
+
+       DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
+
+       exynos_drm_buf_destroy(obj->dev, exynos_gem_obj->buffer);
+
+       if (obj->map_list.map)
+               drm_gem_free_mmap_offset(obj);
+
+       /* release file pointer to gem object. */
+       drm_gem_object_release(obj);
+
+       kfree(exynos_gem_obj);
 }
 
-static struct exynos_drm_gem_obj
-               *exynos_drm_gem_init(struct drm_device *drm_dev,
-                       struct drm_file *file_priv, unsigned int *handle,
-                       unsigned int size)
+static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
+                                                     unsigned long size)
 {
        struct exynos_drm_gem_obj *exynos_gem_obj;
        struct drm_gem_object *obj;
@@ -73,75 +110,41 @@ static struct exynos_drm_gem_obj
 
        exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
        if (!exynos_gem_obj) {
-               DRM_ERROR("failed to allocate exynos gem object.\n");
-               return ERR_PTR(-ENOMEM);
+               DRM_ERROR("failed to allocate exynos gem object\n");
+               return NULL;
        }
 
        obj = &exynos_gem_obj->base;
 
-       ret = drm_gem_object_init(drm_dev, obj, size);
+       ret = drm_gem_object_init(dev, obj, size);
        if (ret < 0) {
-               DRM_ERROR("failed to initialize gem object.\n");
-               ret = -EINVAL;
-               goto err_object_init;
+               DRM_ERROR("failed to initialize gem object\n");
+               kfree(exynos_gem_obj);
+               return NULL;
        }
 
        DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
 
-       ret = drm_gem_create_mmap_offset(obj);
-       if (ret < 0) {
-               DRM_ERROR("failed to allocate mmap offset.\n");
-               goto err_create_mmap_offset;
-       }
-
-       /*
-        * allocate a id of idr table where the obj is registered
-        * and handle has the id what user can see.
-        */
-       ret = drm_gem_handle_create(file_priv, obj, handle);
-       if (ret)
-               goto err_handle_create;
-
-       DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
-
-       /* drop reference from allocate - handle holds it now. */
-       drm_gem_object_unreference_unlocked(obj);
-
        return exynos_gem_obj;
-
-err_handle_create:
-       drm_gem_free_mmap_offset(obj);
-
-err_create_mmap_offset:
-       drm_gem_object_release(obj);
-
-err_object_init:
-       kfree(exynos_gem_obj);
-
-       return ERR_PTR(ret);
 }
 
 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
-                               struct drm_file *file_priv,
-                               unsigned int *handle, unsigned long size)
+                                                unsigned long size)
 {
-
-       struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
        struct exynos_drm_gem_buf *buffer;
+       struct exynos_drm_gem_obj *exynos_gem_obj;
 
        size = roundup(size, PAGE_SIZE);
-
        DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
 
        buffer = exynos_drm_buf_create(dev, size);
-       if (IS_ERR(buffer)) {
-               return ERR_CAST(buffer);
-       }
+       if (!buffer)
+               return ERR_PTR(-ENOMEM);
 
-       exynos_gem_obj = exynos_drm_gem_init(dev, file_priv, handle, size);
-       if (IS_ERR(exynos_gem_obj)) {
+       exynos_gem_obj = exynos_drm_gem_init(dev, size);
+       if (!exynos_gem_obj) {
                exynos_drm_buf_destroy(dev, buffer);
-               return exynos_gem_obj;
+               return ERR_PTR(-ENOMEM);
        }
 
        exynos_gem_obj->buffer = buffer;
@@ -150,23 +153,30 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
 }
 
 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
-                                       struct drm_file *file_priv)
+                               struct drm_file *file_priv)
 {
        struct drm_exynos_gem_create *args = data;
-       struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
+       struct exynos_drm_gem_obj *exynos_gem_obj;
+       int ret;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
-                                               &args->handle, args->size);
+       exynos_gem_obj = exynos_drm_gem_create(dev, args->size);
        if (IS_ERR(exynos_gem_obj))
                return PTR_ERR(exynos_gem_obj);
 
+       ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
+                       &args->handle);
+       if (ret) {
+               exynos_drm_gem_destroy(exynos_gem_obj);
+               return ret;
+       }
+
        return 0;
 }
 
 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
-               struct drm_file *file_priv)
+                                   struct drm_file *file_priv)
 {
        struct drm_exynos_gem_map_off *args = data;
 
@@ -185,7 +195,7 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
 }
 
 static int exynos_drm_gem_mmap_buffer(struct file *filp,
-               struct vm_area_struct *vma)
+                                     struct vm_area_struct *vma)
 {
        struct drm_gem_object *obj = filp->private_data;
        struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
@@ -196,6 +206,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
 
        vma->vm_flags |= (VM_IO | VM_RESERVED);
 
+       /* in case of direct mapping, always having non-cachable attribute */
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        vma->vm_file = filp;
 
@@ -232,7 +243,7 @@ static const struct file_operations exynos_drm_gem_fops = {
 };
 
 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
-               struct drm_file *file_priv)
+                             struct drm_file *file_priv)
 {
        struct drm_exynos_gem_mmap *args = data;
        struct drm_gem_object *obj;
@@ -278,32 +289,19 @@ int exynos_drm_gem_init_object(struct drm_gem_object *obj)
        return 0;
 }
 
-void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj)
+void exynos_drm_gem_free_object(struct drm_gem_object *obj)
 {
-       struct exynos_drm_gem_obj *exynos_gem_obj;
-
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       DRM_DEBUG_KMS("handle count = %d\n",
-                       atomic_read(&gem_obj->handle_count));
-
-       if (gem_obj->map_list.map)
-               drm_gem_free_mmap_offset(gem_obj);
-
-       /* release file pointer to gem object. */
-       drm_gem_object_release(gem_obj);
-
-       exynos_gem_obj = to_exynos_gem_obj(gem_obj);
-
-       exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->buffer);
-
-       kfree(exynos_gem_obj);
+       exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
 }
 
 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
-               struct drm_device *dev, struct drm_mode_create_dumb *args)
+                              struct drm_device *dev,
+                              struct drm_mode_create_dumb *args)
 {
        struct exynos_drm_gem_obj *exynos_gem_obj;
+       int ret;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -316,19 +314,27 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
        args->pitch = args->width * args->bpp >> 3;
        args->size = args->pitch * args->height;
 
-       exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, &args->handle,
-                                                       args->size);
+       exynos_gem_obj = exynos_drm_gem_create(dev, args->size);
        if (IS_ERR(exynos_gem_obj))
                return PTR_ERR(exynos_gem_obj);
 
+       ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
+                       &args->handle);
+       if (ret) {
+               exynos_drm_gem_destroy(exynos_gem_obj);
+               return ret;
+       }
+
        return 0;
 }
 
 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
-               struct drm_device *dev, uint32_t handle, uint64_t *offset)
+                                  struct drm_device *dev, uint32_t handle,
+                                  uint64_t *offset)
 {
        struct exynos_drm_gem_obj *exynos_gem_obj;
        struct drm_gem_object *obj;
+       int ret = 0;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -343,19 +349,46 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
        obj = drm_gem_object_lookup(dev, file_priv, handle);
        if (!obj) {
                DRM_ERROR("failed to lookup gem object.\n");
-               mutex_unlock(&dev->struct_mutex);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto unlock;
        }
 
        exynos_gem_obj = to_exynos_gem_obj(obj);
 
-       *offset = get_gem_mmap_offset(&exynos_gem_obj->base);
-
-       drm_gem_object_unreference(obj);
+       if (!exynos_gem_obj->base.map_list.map) {
+               ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base);
+               if (ret)
+                       goto out;
+       }
 
+       *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT;
        DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
 
+out:
+       drm_gem_object_unreference(obj);
+unlock:
        mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
+                               struct drm_device *dev,
+                               unsigned int handle)
+{
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       /*
+        * obj->refcount and obj->handle_count are decreased and
+        * if both them are 0 then exynos_drm_gem_free_object()
+        * would be called by callback to release resources.
+        */
+       ret = drm_gem_handle_delete(file_priv, handle);
+       if (ret < 0) {
+               DRM_ERROR("failed to delete drm_gem_handle.\n");
+               return ret;
+       }
 
        return 0;
 }
@@ -403,28 +436,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
        return ret;
 }
 
-
-int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
-               struct drm_device *dev, unsigned int handle)
-{
-       int ret;
-
-       DRM_DEBUG_KMS("%s\n", __FILE__);
-
-       /*
-        * obj->refcount and obj->handle_count are decreased and
-        * if both them are 0 then exynos_drm_gem_free_object()
-        * would be called by callback to release resources.
-        */
-       ret = drm_gem_handle_delete(file_priv, handle);
-       if (ret < 0) {
-               DRM_ERROR("failed to delete drm_gem_handle.\n");
-               return ret;
-       }
-
-       return 0;
-}
-
 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
 MODULE_DESCRIPTION("Samsung SoC DRM GEM Module");
 MODULE_LICENSE("GPL");
index ef87973..67cdc91 100644 (file)
@@ -60,14 +60,16 @@ struct exynos_drm_gem_buf {
  *     user can access the buffer through kms_bo.handle.
  */
 struct exynos_drm_gem_obj {
-       struct drm_gem_object base;
-       struct exynos_drm_gem_buf *buffer;
+       struct drm_gem_object           base;
+       struct exynos_drm_gem_buf       *buffer;
 };
 
-/* create a new buffer and get a new gem handle. */
+/* destroy a buffer with gem object */
+void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
+
+/* create a new buffer with gem object */
 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
-               struct drm_file *file_priv,
-               unsigned int *handle, unsigned long size);
+                                                unsigned long size);
 
 /*
  * request gem object creation and buffer allocation as the size
@@ -75,15 +77,18 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
  * height and bpp.
  */
 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
-               struct drm_file *file_priv);
+                               struct drm_file *file_priv);
 
 /* get buffer offset to map to user space. */
 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
-               struct drm_file *file_priv);
+                                   struct drm_file *file_priv);
 
-/* unmap a buffer from user space. */
-int exynos_drm_gem_munmap_ioctl(struct drm_device *dev, void *data,
-               struct drm_file *file_priv);
+/*
+ * mmap the physically continuous memory that a gem object contains
+ * to user space.
+ */
+int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv);
 
 /* initialize gem object. */
 int exynos_drm_gem_init_object(struct drm_gem_object *obj);
@@ -93,24 +98,13 @@ void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
 
 /* create memory region for drm framebuffer. */
 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
-               struct drm_device *dev, struct drm_mode_create_dumb *args);
+                              struct drm_device *dev,
+                              struct drm_mode_create_dumb *args);
 
 /* map memory region for drm framebuffer to user space. */
 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
-               struct drm_device *dev, uint32_t handle, uint64_t *offset);
-
-/* page fault handler and mmap fault address(virtual) to physical memory. */
-int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-
-/*
- * mmap the physically continuous memory that a gem object contains
- * to user space.
- */
-int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
-               struct drm_file *file_priv);
-
-/* set vm_flags and we can change the vm attribute to other one at here. */
-int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+                                  struct drm_device *dev, uint32_t handle,
+                                  uint64_t *offset);
 
 /*
  * destroy memory region allocated.
@@ -118,6 +112,13 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
  *     would be released by drm_gem_handle_delete().
  */
 int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
-               struct drm_device *dev, unsigned int handle);
+                               struct drm_device *dev,
+                               unsigned int handle);
+
+/* page fault handler and mmap fault address(virtual) to physical memory. */
+int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+/* set vm_flags and we can change the vm attribute to other one at here. */
+int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
new file mode 100644 (file)
index 0000000..ed8a319
--- /dev/null
@@ -0,0 +1,439 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ *     Inki Dae <inki.dae@samsung.com>
+ *     Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_hdmi.h"
+
+#define to_context(dev)                platform_get_drvdata(to_platform_device(dev))
+#define to_subdrv(dev)         to_context(dev)
+#define get_ctx_from_subdrv(subdrv)    container_of(subdrv,\
+                                       struct drm_hdmi_context, subdrv);
+
+/* these callback points shoud be set by specific drivers. */
+static struct exynos_hdmi_display_ops *hdmi_display_ops;
+static struct exynos_hdmi_manager_ops *hdmi_manager_ops;
+static struct exynos_hdmi_overlay_ops *hdmi_overlay_ops;
+
+struct drm_hdmi_context {
+       struct exynos_drm_subdrv        subdrv;
+       struct exynos_drm_hdmi_context  *hdmi_ctx;
+       struct exynos_drm_hdmi_context  *mixer_ctx;
+       struct work_struct              work;
+};
+
+void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops
+                                       *display_ops)
+{
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (display_ops)
+               hdmi_display_ops = display_ops;
+}
+EXPORT_SYMBOL(exynos_drm_display_ops_register);
+
+void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops
+                                       *manager_ops)
+{
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (manager_ops)
+               hdmi_manager_ops = manager_ops;
+}
+EXPORT_SYMBOL(exynos_drm_manager_ops_register);
+
+void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops
+                                       *overlay_ops)
+{
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (overlay_ops)
+               hdmi_overlay_ops = overlay_ops;
+}
+EXPORT_SYMBOL(exynos_drm_overlay_ops_register);
+
+static bool drm_hdmi_is_connected(struct device *dev)
+{
+       struct drm_hdmi_context *ctx = to_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (hdmi_display_ops && hdmi_display_ops->is_connected)
+               return hdmi_display_ops->is_connected(ctx->hdmi_ctx->ctx);
+
+       return false;
+}
+
+static int drm_hdmi_get_edid(struct device *dev,
+               struct drm_connector *connector, u8 *edid, int len)
+{
+       struct drm_hdmi_context *ctx = to_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (hdmi_display_ops && hdmi_display_ops->get_edid)
+               return hdmi_display_ops->get_edid(ctx->hdmi_ctx->ctx,
+                               connector, edid, len);
+
+       return 0;
+}
+
+static int drm_hdmi_check_timing(struct device *dev, void *timing)
+{
+       struct drm_hdmi_context *ctx = to_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (hdmi_display_ops && hdmi_display_ops->check_timing)
+               return hdmi_display_ops->check_timing(ctx->hdmi_ctx->ctx,
+                               timing);
+
+       return 0;
+}
+
+static int drm_hdmi_power_on(struct device *dev, int mode)
+{
+       struct drm_hdmi_context *ctx = to_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (hdmi_display_ops && hdmi_display_ops->power_on)
+               return hdmi_display_ops->power_on(ctx->hdmi_ctx->ctx, mode);
+
+       return 0;
+}
+
+static struct exynos_drm_display_ops drm_hdmi_display_ops = {
+       .type = EXYNOS_DISPLAY_TYPE_HDMI,
+       .is_connected = drm_hdmi_is_connected,
+       .get_edid = drm_hdmi_get_edid,
+       .check_timing = drm_hdmi_check_timing,
+       .power_on = drm_hdmi_power_on,
+};
+
+static int drm_hdmi_enable_vblank(struct device *subdrv_dev)
+{
+       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+       struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+       struct exynos_drm_manager *manager = &subdrv->manager;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (hdmi_overlay_ops && hdmi_overlay_ops->enable_vblank)
+               return hdmi_overlay_ops->enable_vblank(ctx->mixer_ctx->ctx,
+                                                       manager->pipe);
+
+       return 0;
+}
+
+static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
+{
+       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (hdmi_overlay_ops && hdmi_overlay_ops->disable_vblank)
+               return hdmi_overlay_ops->disable_vblank(ctx->mixer_ctx->ctx);
+}
+
+static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
+{
+       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (hdmi_manager_ops && hdmi_manager_ops->mode_set)
+               hdmi_manager_ops->mode_set(ctx->hdmi_ctx->ctx, mode);
+}
+
+static void drm_hdmi_commit(struct device *subdrv_dev)
+{
+       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (hdmi_manager_ops && hdmi_manager_ops->commit)
+               hdmi_manager_ops->commit(ctx->hdmi_ctx->ctx);
+}
+
+static void drm_hdmi_dpms(struct device *subdrv_dev, int mode)
+{
+       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               if (hdmi_manager_ops && hdmi_manager_ops->disable)
+                       hdmi_manager_ops->disable(ctx->hdmi_ctx->ctx);
+               break;
+       default:
+               DRM_DEBUG_KMS("unkown dps mode: %d\n", mode);
+               break;
+       }
+}
+
+static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
+       .dpms = drm_hdmi_dpms,
+       .enable_vblank = drm_hdmi_enable_vblank,
+       .disable_vblank = drm_hdmi_disable_vblank,
+       .mode_set = drm_hdmi_mode_set,
+       .commit = drm_hdmi_commit,
+};
+
+static void drm_mixer_mode_set(struct device *subdrv_dev,
+               struct exynos_drm_overlay *overlay)
+{
+       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (hdmi_overlay_ops && hdmi_overlay_ops->win_mode_set)
+               hdmi_overlay_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay);
+}
+
+static void drm_mixer_commit(struct device *subdrv_dev, int zpos)
+{
+       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (hdmi_overlay_ops && hdmi_overlay_ops->win_commit)
+               hdmi_overlay_ops->win_commit(ctx->mixer_ctx->ctx, zpos);
+}
+
+static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
+{
+       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (hdmi_overlay_ops && hdmi_overlay_ops->win_disable)
+               hdmi_overlay_ops->win_disable(ctx->mixer_ctx->ctx, zpos);
+}
+
+static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
+       .mode_set = drm_mixer_mode_set,
+       .commit = drm_mixer_commit,
+       .disable = drm_mixer_disable,
+};
+
+
+static int hdmi_subdrv_probe(struct drm_device *drm_dev,
+               struct device *dev)
+{
+       struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
+       struct drm_hdmi_context *ctx;
+       struct platform_device *pdev = to_platform_device(dev);
+       struct exynos_drm_common_hdmi_pd *pd;
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       pd = pdev->dev.platform_data;
+
+       if (!pd) {
+               DRM_DEBUG_KMS("platform data is null.\n");
+               return -EFAULT;
+       }
+
+       if (!pd->hdmi_dev) {
+               DRM_DEBUG_KMS("hdmi device is null.\n");
+               return -EFAULT;
+       }
+
+       if (!pd->mixer_dev) {
+               DRM_DEBUG_KMS("mixer device is null.\n");
+               return -EFAULT;
+       }
+
+       ret = platform_driver_register(&hdmi_driver);
+       if (ret) {
+               DRM_DEBUG_KMS("failed to register hdmi driver.\n");
+               return ret;
+       }
+
+       ret = platform_driver_register(&mixer_driver);
+       if (ret) {
+               DRM_DEBUG_KMS("failed to register mixer driver.\n");
+               goto err_hdmidrv;
+       }
+
+       ctx = get_ctx_from_subdrv(subdrv);
+
+       ctx->hdmi_ctx = (struct exynos_drm_hdmi_context *)
+                               to_context(pd->hdmi_dev);
+       if (!ctx->hdmi_ctx) {
+               DRM_DEBUG_KMS("hdmi context is null.\n");
+               ret = -EFAULT;
+               goto err_mixerdrv;
+       }
+
+       ctx->hdmi_ctx->drm_dev = drm_dev;
+
+       ctx->mixer_ctx = (struct exynos_drm_hdmi_context *)
+                               to_context(pd->mixer_dev);
+       if (!ctx->mixer_ctx) {
+               DRM_DEBUG_KMS("mixer context is null.\n");
+               ret = -EFAULT;
+               goto err_mixerdrv;
+       }
+
+       ctx->mixer_ctx->drm_dev = drm_dev;
+
+       return 0;
+
+err_mixerdrv:
+       platform_driver_unregister(&mixer_driver);
+err_hdmidrv:
+       platform_driver_unregister(&hdmi_driver);
+       return ret;
+}
+
+static void hdmi_subdrv_remove(struct drm_device *drm_dev)
+{
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       platform_driver_unregister(&hdmi_driver);
+       platform_driver_unregister(&mixer_driver);
+}
+
+static void exynos_drm_hdmi_late_probe(struct work_struct *work)
+{
+       struct drm_hdmi_context *ctx = container_of(work,
+                               struct drm_hdmi_context, work);
+
+       /*
+        * this function calls subdrv->probe() so this must be called
+        * after probe context.
+        *
+        * PS. subdrv->probe() will call platform_driver_register() to probe
+        * hdmi and mixer driver.
+        */
+       exynos_drm_subdrv_register(&ctx->subdrv);
+}
+
+static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct exynos_drm_subdrv *subdrv;
+       struct drm_hdmi_context *ctx;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx) {
+               DRM_LOG_KMS("failed to alloc common hdmi context.\n");
+               return -ENOMEM;
+       }
+
+       subdrv = &ctx->subdrv;
+
+       subdrv->probe = hdmi_subdrv_probe;
+       subdrv->remove = hdmi_subdrv_remove;
+       subdrv->manager.pipe = -1;
+       subdrv->manager.ops = &drm_hdmi_manager_ops;
+       subdrv->manager.overlay_ops = &drm_hdmi_overlay_ops;
+       subdrv->manager.display_ops = &drm_hdmi_display_ops;
+       subdrv->manager.dev = dev;
+
+       platform_set_drvdata(pdev, subdrv);
+
+       INIT_WORK(&ctx->work, exynos_drm_hdmi_late_probe);
+
+       schedule_work(&ctx->work);
+
+       return 0;
+}
+
+static int hdmi_runtime_suspend(struct device *dev)
+{
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       return 0;
+}
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+       .runtime_suspend = hdmi_runtime_suspend,
+       .runtime_resume  = hdmi_runtime_resume,
+};
+
+static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
+{
+       struct drm_hdmi_context *ctx = platform_get_drvdata(pdev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       exynos_drm_subdrv_unregister(&ctx->subdrv);
+       kfree(ctx);
+
+       return 0;
+}
+
+static struct platform_driver exynos_drm_common_hdmi_driver = {
+       .probe          = exynos_drm_hdmi_probe,
+       .remove         = __devexit_p(exynos_drm_hdmi_remove),
+       .driver         = {
+               .name   = "exynos-drm-hdmi",
+               .owner  = THIS_MODULE,
+               .pm = &hdmi_pm_ops,
+       },
+};
+
+static int __init exynos_drm_hdmi_init(void)
+{
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
+       if (ret) {
+               DRM_DEBUG_KMS("failed to register hdmi common driver.\n");
+               return ret;
+       }
+
+       return ret;
+}
+
+static void __exit exynos_drm_hdmi_exit(void)
+{
+       platform_driver_unregister(&exynos_drm_common_hdmi_driver);
+}
+
+module_init(exynos_drm_hdmi_init);
+module_exit(exynos_drm_hdmi_exit);
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM HDMI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
new file mode 100644 (file)
index 0000000..3c29f79
--- /dev/null
@@ -0,0 +1,73 @@
+/* exynos_drm_hdmi.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authoer: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_HDMI_H_
+#define _EXYNOS_DRM_HDMI_H_
+
+/*
+ * exynos hdmi common context structure.
+ *
+ * @drm_dev: pointer to drm_device.
+ * @ctx: pointer to the context of specific device driver.
+ *     this context should be hdmi_context or mixer_context.
+ */
+struct exynos_drm_hdmi_context {
+       struct drm_device       *drm_dev;
+       void                    *ctx;
+};
+
+struct exynos_hdmi_display_ops {
+       bool (*is_connected)(void *ctx);
+       int (*get_edid)(void *ctx, struct drm_connector *connector,
+                       u8 *edid, int len);
+       int (*check_timing)(void *ctx, void *timing);
+       int (*power_on)(void *ctx, int mode);
+};
+
+struct exynos_hdmi_manager_ops {
+       void (*mode_set)(void *ctx, void *mode);
+       void (*commit)(void *ctx);
+       void (*disable)(void *ctx);
+};
+
+struct exynos_hdmi_overlay_ops {
+       int (*enable_vblank)(void *ctx, int pipe);
+       void (*disable_vblank)(void *ctx);
+       void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
+       void (*win_commit)(void *ctx, int zpos);
+       void (*win_disable)(void *ctx, int zpos);
+};
+
+extern struct platform_driver hdmi_driver;
+extern struct platform_driver mixer_driver;
+
+void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops
+                                       *display_ops);
+void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops
+                                       *manager_ops);
+void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops
+                                       *overlay_ops);
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
new file mode 100644 (file)
index 0000000..bdcf770
--- /dev/null
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include "exynos_drm.h"
+#include "exynos_drm_crtc.h"
+#include "exynos_drm_drv.h"
+#include "exynos_drm_encoder.h"
+
+struct exynos_plane {
+       struct drm_plane                base;
+       struct exynos_drm_overlay       overlay;
+       bool                            enabled;
+};
+
+static int
+exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+                    struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+                    unsigned int crtc_w, unsigned int crtc_h,
+                    uint32_t src_x, uint32_t src_y,
+                    uint32_t src_w, uint32_t src_h)
+{
+       struct exynos_plane *exynos_plane =
+               container_of(plane, struct exynos_plane, base);
+       struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
+       struct exynos_drm_crtc_pos pos;
+       unsigned int x = src_x >> 16;
+       unsigned int y = src_y >> 16;
+       int ret;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       memset(&pos, 0, sizeof(struct exynos_drm_crtc_pos));
+       pos.crtc_x = crtc_x;
+       pos.crtc_y = crtc_y;
+       pos.crtc_w = crtc_w;
+       pos.crtc_h = crtc_h;
+
+       pos.fb_x = x;
+       pos.fb_y = y;
+
+       /* TODO: scale feature */
+       ret = exynos_drm_overlay_update(overlay, fb, &crtc->mode, &pos);
+       if (ret < 0)
+               return ret;
+
+       exynos_drm_fn_encoder(crtc, overlay,
+                       exynos_drm_encoder_crtc_mode_set);
+       exynos_drm_fn_encoder(crtc, &overlay->zpos,
+                       exynos_drm_encoder_crtc_plane_commit);
+
+       exynos_plane->enabled = true;
+
+       return 0;
+}
+
+static int exynos_disable_plane(struct drm_plane *plane)
+{
+       struct exynos_plane *exynos_plane =
+               container_of(plane, struct exynos_plane, base);
+       struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       if (!exynos_plane->enabled)
+               return 0;
+
+       exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
+                       exynos_drm_encoder_crtc_disable);
+
+       exynos_plane->enabled = false;
+       exynos_plane->overlay.zpos = DEFAULT_ZPOS;
+
+       return 0;
+}
+
+static void exynos_plane_destroy(struct drm_plane *plane)
+{
+       struct exynos_plane *exynos_plane =
+               container_of(plane, struct exynos_plane, base);
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       exynos_disable_plane(plane);
+       drm_plane_cleanup(plane);
+       kfree(exynos_plane);
+}
+
+static struct drm_plane_funcs exynos_plane_funcs = {
+       .update_plane   = exynos_update_plane,
+       .disable_plane  = exynos_disable_plane,
+       .destroy        = exynos_plane_destroy,
+};
+
+int exynos_plane_init(struct drm_device *dev, unsigned int nr)
+{
+       struct exynos_plane *exynos_plane;
+       uint32_t possible_crtcs;
+
+       exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
+       if (!exynos_plane)
+               return -ENOMEM;
+
+       /* all CRTCs are available */
+       possible_crtcs = (1 << MAX_CRTC) - 1;
+
+       exynos_plane->overlay.zpos = DEFAULT_ZPOS;
+
+       /* TODO: format */
+       return drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
+                             &exynos_plane_funcs, NULL, 0, false);
+}
+
+int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv)
+{
+       struct drm_exynos_plane_set_zpos *zpos_req = data;
+       struct drm_mode_object *obj;
+       struct drm_plane *plane;
+       struct exynos_plane *exynos_plane;
+       int ret = 0;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       if (zpos_req->zpos < 0 || zpos_req->zpos >= MAX_PLANE) {
+               if (zpos_req->zpos != DEFAULT_ZPOS) {
+                       DRM_ERROR("zpos not within limits\n");
+                       return -EINVAL;
+               }
+       }
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, zpos_req->plane_id,
+                       DRM_MODE_OBJECT_PLANE);
+       if (!obj) {
+               DRM_DEBUG_KMS("Unknown plane ID %d\n",
+                             zpos_req->plane_id);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       plane = obj_to_plane(obj);
+       exynos_plane = container_of(plane, struct exynos_plane, base);
+
+       exynos_plane->overlay.zpos = zpos_req->zpos;
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
new file mode 100644 (file)
index 0000000..16b71f8
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+int exynos_plane_init(struct drm_device *dev, unsigned int nr);
+int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
new file mode 100644 (file)
index 0000000..f48f7ce
--- /dev/null
@@ -0,0 +1,1176 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *     Inki Dae <inki.dae@samsung.com>
+ *     Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * Based on drivers/media/video/s5p-tv/hdmi_drv.c
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_edid.h"
+#include "drm_crtc_helper.h"
+
+#include "regs-hdmi.h"
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_hdmi.h"
+
+#include "exynos_hdmi.h"
+
+#define HDMI_OVERLAY_NUMBER    3
+#define get_hdmi_context(dev)  platform_get_drvdata(to_platform_device(dev))
+
+static const u8 hdmiphy_conf27[32] = {
+       0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+       0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+       0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+       0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf27_027[32] = {
+       0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
+       0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+       0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+       0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf74_175[32] = {
+       0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
+       0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
+       0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+       0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf74_25[32] = {
+       0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
+       0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
+       0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0,
+       0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf148_5[32] = {
+       0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
+       0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
+       0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+       0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
+};
+
+struct hdmi_tg_regs {
+       u8 cmd;
+       u8 h_fsz_l;
+       u8 h_fsz_h;
+       u8 hact_st_l;
+       u8 hact_st_h;
+       u8 hact_sz_l;
+       u8 hact_sz_h;
+       u8 v_fsz_l;
+       u8 v_fsz_h;
+       u8 vsync_l;
+       u8 vsync_h;
+       u8 vsync2_l;
+       u8 vsync2_h;
+       u8 vact_st_l;
+       u8 vact_st_h;
+       u8 vact_sz_l;
+       u8 vact_sz_h;
+       u8 field_chg_l;
+       u8 field_chg_h;
+       u8 vact_st2_l;
+       u8 vact_st2_h;
+       u8 vsync_top_hdmi_l;
+       u8 vsync_top_hdmi_h;
+       u8 vsync_bot_hdmi_l;
+       u8 vsync_bot_hdmi_h;
+       u8 field_top_hdmi_l;
+       u8 field_top_hdmi_h;
+       u8 field_bot_hdmi_l;
+       u8 field_bot_hdmi_h;
+};
+
+struct hdmi_core_regs {
+       u8 h_blank[2];
+       u8 v_blank[3];
+       u8 h_v_line[3];
+       u8 vsync_pol[1];
+       u8 int_pro_mode[1];
+       u8 v_blank_f[3];
+       u8 h_sync_gen[3];
+       u8 v_sync_gen1[3];
+       u8 v_sync_gen2[3];
+       u8 v_sync_gen3[3];
+};
+
+struct hdmi_preset_conf {
+       struct hdmi_core_regs core;
+       struct hdmi_tg_regs tg;
+};
+
+static const struct hdmi_preset_conf hdmi_conf_480p = {
+       .core = {
+               .h_blank = {0x8a, 0x00},
+               .v_blank = {0x0d, 0x6a, 0x01},
+               .h_v_line = {0x0d, 0xa2, 0x35},
+               .vsync_pol = {0x01},
+               .int_pro_mode = {0x00},
+               .v_blank_f = {0x00, 0x00, 0x00},
+               .h_sync_gen = {0x0e, 0x30, 0x11},
+               .v_sync_gen1 = {0x0f, 0x90, 0x00},
+               /* other don't care */
+       },
+       .tg = {
+               0x00, /* cmd */
+               0x5a, 0x03, /* h_fsz */
+               0x8a, 0x00, 0xd0, 0x02, /* hact */
+               0x0d, 0x02, /* v_fsz */
+               0x01, 0x00, 0x33, 0x02, /* vsync */
+               0x2d, 0x00, 0xe0, 0x01, /* vact */
+               0x33, 0x02, /* field_chg */
+               0x49, 0x02, /* vact_st2 */
+               0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+               0x01, 0x00, 0x33, 0x02, /* field top/bot */
+       },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_720p60 = {
+       .core = {
+               .h_blank = {0x72, 0x01},
+               .v_blank = {0xee, 0xf2, 0x00},
+               .h_v_line = {0xee, 0x22, 0x67},
+               .vsync_pol = {0x00},
+               .int_pro_mode = {0x00},
+               .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
+               .h_sync_gen = {0x6c, 0x50, 0x02},
+               .v_sync_gen1 = {0x0a, 0x50, 0x00},
+               .v_sync_gen2 = {0x01, 0x10, 0x00},
+               .v_sync_gen3 = {0x01, 0x10, 0x00},
+               /* other don't care */
+       },
+       .tg = {
+               0x00, /* cmd */
+               0x72, 0x06, /* h_fsz */
+               0x71, 0x01, 0x01, 0x05, /* hact */
+               0xee, 0x02, /* v_fsz */
+               0x01, 0x00, 0x33, 0x02, /* vsync */
+               0x1e, 0x00, 0xd0, 0x02, /* vact */
+               0x33, 0x02, /* field_chg */
+               0x49, 0x02, /* vact_st2 */
+               0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+               0x01, 0x00, 0x33, 0x02, /* field top/bot */
+       },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080i50 = {
+       .core = {
+               .h_blank = {0xd0, 0x02},
+               .v_blank = {0x32, 0xB2, 0x00},
+               .h_v_line = {0x65, 0x04, 0xa5},
+               .vsync_pol = {0x00},
+               .int_pro_mode = {0x01},
+               .v_blank_f = {0x49, 0x2A, 0x23},
+               .h_sync_gen = {0x0E, 0xEA, 0x08},
+               .v_sync_gen1 = {0x07, 0x20, 0x00},
+               .v_sync_gen2 = {0x39, 0x42, 0x23},
+               .v_sync_gen3 = {0x38, 0x87, 0x73},
+               /* other don't care */
+       },
+       .tg = {
+               0x00, /* cmd */
+               0x50, 0x0A, /* h_fsz */
+               0xCF, 0x02, 0x81, 0x07, /* hact */
+               0x65, 0x04, /* v_fsz */
+               0x01, 0x00, 0x33, 0x02, /* vsync */
+               0x16, 0x00, 0x1c, 0x02, /* vact */
+               0x33, 0x02, /* field_chg */
+               0x49, 0x02, /* vact_st2 */
+               0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+               0x01, 0x00, 0x33, 0x02, /* field top/bot */
+       },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
+       .core = {
+               .h_blank = {0xd0, 0x02},
+               .v_blank = {0x65, 0x6c, 0x01},
+               .h_v_line = {0x65, 0x04, 0xa5},
+               .vsync_pol = {0x00},
+               .int_pro_mode = {0x00},
+               .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
+               .h_sync_gen = {0x0e, 0xea, 0x08},
+               .v_sync_gen1 = {0x09, 0x40, 0x00},
+               .v_sync_gen2 = {0x01, 0x10, 0x00},
+               .v_sync_gen3 = {0x01, 0x10, 0x00},
+               /* other don't care */
+       },
+       .tg = {
+               0x00, /* cmd */
+               0x50, 0x0A, /* h_fsz */
+               0xCF, 0x02, 0x81, 0x07, /* hact */
+               0x65, 0x04, /* v_fsz */
+               0x01, 0x00, 0x33, 0x02, /* vsync */
+               0x2d, 0x00, 0x38, 0x04, /* vact */
+               0x33, 0x02, /* field_chg */
+               0x48, 0x02, /* vact_st2 */
+               0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+               0x01, 0x00, 0x33, 0x02, /* field top/bot */
+       },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080i60 = {
+       .core = {
+               .h_blank = {0x18, 0x01},
+               .v_blank = {0x32, 0xB2, 0x00},
+               .h_v_line = {0x65, 0x84, 0x89},
+               .vsync_pol = {0x00},
+               .int_pro_mode = {0x01},
+               .v_blank_f = {0x49, 0x2A, 0x23},
+               .h_sync_gen = {0x56, 0x08, 0x02},
+               .v_sync_gen1 = {0x07, 0x20, 0x00},
+               .v_sync_gen2 = {0x39, 0x42, 0x23},
+               .v_sync_gen3 = {0xa4, 0x44, 0x4a},
+               /* other don't care */
+       },
+       .tg = {
+               0x00, /* cmd */
+               0x98, 0x08, /* h_fsz */
+               0x17, 0x01, 0x81, 0x07, /* hact */
+               0x65, 0x04, /* v_fsz */
+               0x01, 0x00, 0x33, 0x02, /* vsync */
+               0x16, 0x00, 0x1c, 0x02, /* vact */
+               0x33, 0x02, /* field_chg */
+               0x49, 0x02, /* vact_st2 */
+               0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+               0x01, 0x00, 0x33, 0x02, /* field top/bot */
+       },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
+       .core = {
+               .h_blank = {0x18, 0x01},
+               .v_blank = {0x65, 0x6c, 0x01},
+               .h_v_line = {0x65, 0x84, 0x89},
+               .vsync_pol = {0x00},
+               .int_pro_mode = {0x00},
+               .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
+               .h_sync_gen = {0x56, 0x08, 0x02},
+               .v_sync_gen1 = {0x09, 0x40, 0x00},
+               .v_sync_gen2 = {0x01, 0x10, 0x00},
+               .v_sync_gen3 = {0x01, 0x10, 0x00},
+               /* other don't care */
+       },
+       .tg = {
+               0x00, /* cmd */
+               0x98, 0x08, /* h_fsz */
+               0x17, 0x01, 0x81, 0x07, /* hact */
+               0x65, 0x04, /* v_fsz */
+               0x01, 0x00, 0x33, 0x02, /* vsync */
+               0x2d, 0x00, 0x38, 0x04, /* vact */
+               0x33, 0x02, /* field_chg */
+               0x48, 0x02, /* vact_st2 */
+               0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+               0x01, 0x00, 0x33, 0x02, /* field top/bot */
+       },
+};
+
+static const struct hdmi_conf hdmi_confs[] = {
+       { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
+       { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
+       { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p },
+       { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
+       { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
+       { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
+       { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
+};
+
+
+static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
+{
+       return readl(hdata->regs + reg_id);
+}
+
+static inline void hdmi_reg_writeb(struct hdmi_context *hdata,
+                                u32 reg_id, u8 value)
+{
+       writeb(value, hdata->regs + reg_id);
+}
+
+static inline void hdmi_reg_writemask(struct hdmi_context *hdata,
+                                u32 reg_id, u32 value, u32 mask)
+{
+       u32 old = readl(hdata->regs + reg_id);
+       value = (value & mask) | (old & ~mask);
+       writel(value, hdata->regs + reg_id);
+}
+
+static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
+{
+#define DUMPREG(reg_id) \
+       DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \
+       readl(hdata->regs + reg_id))
+       DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix);
+       DUMPREG(HDMI_INTC_FLAG);
+       DUMPREG(HDMI_INTC_CON);
+       DUMPREG(HDMI_HPD_STATUS);
+       DUMPREG(HDMI_PHY_RSTOUT);
+       DUMPREG(HDMI_PHY_VPLL);
+       DUMPREG(HDMI_PHY_CMU);
+       DUMPREG(HDMI_CORE_RSTOUT);
+
+       DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix);
+       DUMPREG(HDMI_CON_0);
+       DUMPREG(HDMI_CON_1);
+       DUMPREG(HDMI_CON_2);
+       DUMPREG(HDMI_SYS_STATUS);
+       DUMPREG(HDMI_PHY_STATUS);
+       DUMPREG(HDMI_STATUS_EN);
+       DUMPREG(HDMI_HPD);
+       DUMPREG(HDMI_MODE_SEL);
+       DUMPREG(HDMI_HPD_GEN);
+       DUMPREG(HDMI_DC_CONTROL);
+       DUMPREG(HDMI_VIDEO_PATTERN_GEN);
+
+       DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix);
+       DUMPREG(HDMI_H_BLANK_0);
+       DUMPREG(HDMI_H_BLANK_1);
+       DUMPREG(HDMI_V_BLANK_0);
+       DUMPREG(HDMI_V_BLANK_1);
+       DUMPREG(HDMI_V_BLANK_2);
+       DUMPREG(HDMI_H_V_LINE_0);
+       DUMPREG(HDMI_H_V_LINE_1);
+       DUMPREG(HDMI_H_V_LINE_2);
+       DUMPREG(HDMI_VSYNC_POL);
+       DUMPREG(HDMI_INT_PRO_MODE);
+       DUMPREG(HDMI_V_BLANK_F_0);
+       DUMPREG(HDMI_V_BLANK_F_1);
+       DUMPREG(HDMI_V_BLANK_F_2);
+       DUMPREG(HDMI_H_SYNC_GEN_0);
+       DUMPREG(HDMI_H_SYNC_GEN_1);
+       DUMPREG(HDMI_H_SYNC_GEN_2);
+       DUMPREG(HDMI_V_SYNC_GEN_1_0);
+       DUMPREG(HDMI_V_SYNC_GEN_1_1);
+       DUMPREG(HDMI_V_SYNC_GEN_1_2);
+       DUMPREG(HDMI_V_SYNC_GEN_2_0);
+       DUMPREG(HDMI_V_SYNC_GEN_2_1);
+       DUMPREG(HDMI_V_SYNC_GEN_2_2);
+       DUMPREG(HDMI_V_SYNC_GEN_3_0);
+       DUMPREG(HDMI_V_SYNC_GEN_3_1);
+       DUMPREG(HDMI_V_SYNC_GEN_3_2);
+
+       DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix);
+       DUMPREG(HDMI_TG_CMD);
+       DUMPREG(HDMI_TG_H_FSZ_L);
+       DUMPREG(HDMI_TG_H_FSZ_H);
+       DUMPREG(HDMI_TG_HACT_ST_L);
+       DUMPREG(HDMI_TG_HACT_ST_H);
+       DUMPREG(HDMI_TG_HACT_SZ_L);
+       DUMPREG(HDMI_TG_HACT_SZ_H);
+       DUMPREG(HDMI_TG_V_FSZ_L);
+       DUMPREG(HDMI_TG_V_FSZ_H);
+       DUMPREG(HDMI_TG_VSYNC_L);
+       DUMPREG(HDMI_TG_VSYNC_H);
+       DUMPREG(HDMI_TG_VSYNC2_L);
+       DUMPREG(HDMI_TG_VSYNC2_H);
+       DUMPREG(HDMI_TG_VACT_ST_L);
+       DUMPREG(HDMI_TG_VACT_ST_H);
+       DUMPREG(HDMI_TG_VACT_SZ_L);
+       DUMPREG(HDMI_TG_VACT_SZ_H);
+       DUMPREG(HDMI_TG_FIELD_CHG_L);
+       DUMPREG(HDMI_TG_FIELD_CHG_H);
+       DUMPREG(HDMI_TG_VACT_ST2_L);
+       DUMPREG(HDMI_TG_VACT_ST2_H);
+       DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
+       DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
+       DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
+       DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
+       DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
+       DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
+       DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
+       DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
+#undef DUMPREG
+}
+
+static int hdmi_conf_index(struct drm_display_mode *mode)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(hdmi_confs); ++i)
+               if (hdmi_confs[i].width == mode->hdisplay &&
+                               hdmi_confs[i].height == mode->vdisplay &&
+                               hdmi_confs[i].vrefresh == mode->vrefresh &&
+                               hdmi_confs[i].interlace ==
+                               ((mode->flags & DRM_MODE_FLAG_INTERLACE) ?
+                                true : false))
+                       return i;
+
+       return -1;
+}
+
+static bool hdmi_is_connected(void *ctx)
+{
+       struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+       u32 val = hdmi_reg_read(hdata, HDMI_HPD_STATUS);
+
+       if (val)
+               return true;
+
+       return false;
+}
+
+static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
+                               u8 *edid, int len)
+{
+       struct edid *raw_edid;
+       struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       if (!hdata->ddc_port)
+               return -ENODEV;
+
+       raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter);
+       if (raw_edid) {
+               memcpy(edid, raw_edid, min((1 + raw_edid->extensions)
+                                       * EDID_LENGTH, len));
+               DRM_DEBUG_KMS("width[%d] x height[%d]\n",
+                               raw_edid->width_cm, raw_edid->height_cm);
+       } else {
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int hdmi_check_timing(void *ctx, void *timing)
+{
+       struct fb_videomode *check_timing = timing;
+       int i;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", check_timing->xres,
+                       check_timing->yres, check_timing->refresh,
+                       check_timing->vmode);
+
+       for (i = 0; i < ARRAY_SIZE(hdmi_confs); ++i)
+               if (hdmi_confs[i].width == check_timing->xres &&
+                       hdmi_confs[i].height == check_timing->yres &&
+                       hdmi_confs[i].vrefresh == check_timing->refresh &&
+                       hdmi_confs[i].interlace ==
+                       ((check_timing->vmode & FB_VMODE_INTERLACED) ?
+                        true : false))
+                       return 0;
+
+       return -EINVAL;
+}
+
+static int hdmi_display_power_on(void *ctx, int mode)
+{
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               DRM_DEBUG_KMS("hdmi [on]\n");
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+               break;
+       case DRM_MODE_DPMS_SUSPEND:
+               break;
+       case DRM_MODE_DPMS_OFF:
+               DRM_DEBUG_KMS("hdmi [off]\n");
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static struct exynos_hdmi_display_ops display_ops = {
+       .is_connected   = hdmi_is_connected,
+       .get_edid       = hdmi_get_edid,
+       .check_timing   = hdmi_check_timing,
+       .power_on       = hdmi_display_power_on,
+};
+
+static void hdmi_conf_reset(struct hdmi_context *hdata)
+{
+       /* disable hpd handle for drm */
+       hdata->hpd_handle = false;
+
+       /* resetting HDMI core */
+       hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT,  0, HDMI_CORE_SW_RSTOUT);
+       mdelay(10);
+       hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT, ~0, HDMI_CORE_SW_RSTOUT);
+       mdelay(10);
+
+       /* enable hpd handle for drm */
+       hdata->hpd_handle = true;
+}
+
+static void hdmi_conf_init(struct hdmi_context *hdata)
+{
+       /* disable hpd handle for drm */
+       hdata->hpd_handle = false;
+
+       /* enable HPD interrupts */
+       hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
+               HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
+       mdelay(10);
+       hdmi_reg_writemask(hdata, HDMI_INTC_CON, ~0, HDMI_INTC_EN_GLOBAL |
+               HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
+
+       /* choose HDMI mode */
+       hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
+               HDMI_MODE_HDMI_EN, HDMI_MODE_MASK);
+       /* disable bluescreen */
+       hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
+       /* choose bluescreen (fecal) color */
+       hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_0, 0x12);
+       hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_1, 0x34);
+       hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_2, 0x56);
+       /* enable AVI packet every vsync, fixes purple line problem */
+       hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
+       /* force RGB, look to CEA-861-D, table 7 for more detail */
+       hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(0), 0 << 5);
+       hdmi_reg_writemask(hdata, HDMI_CON_1, 0x10 << 5, 0x11 << 5);
+
+       hdmi_reg_writeb(hdata, HDMI_SPD_CON, 0x02);
+       hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
+       hdmi_reg_writeb(hdata, HDMI_ACR_CON, 0x04);
+
+       /* enable hpd handle for drm */
+       hdata->hpd_handle = true;
+}
+
+static void hdmi_timing_apply(struct hdmi_context *hdata,
+                                const struct hdmi_preset_conf *conf)
+{
+       const struct hdmi_core_regs *core = &conf->core;
+       const struct hdmi_tg_regs *tg = &conf->tg;
+       int tries;
+
+       /* setting core registers */
+       hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
+       hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
+       hdmi_reg_writeb(hdata, HDMI_V_BLANK_0, core->v_blank[0]);
+       hdmi_reg_writeb(hdata, HDMI_V_BLANK_1, core->v_blank[1]);
+       hdmi_reg_writeb(hdata, HDMI_V_BLANK_2, core->v_blank[2]);
+       hdmi_reg_writeb(hdata, HDMI_H_V_LINE_0, core->h_v_line[0]);
+       hdmi_reg_writeb(hdata, HDMI_H_V_LINE_1, core->h_v_line[1]);
+       hdmi_reg_writeb(hdata, HDMI_H_V_LINE_2, core->h_v_line[2]);
+       hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
+       hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
+       hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_0, core->v_blank_f[0]);
+       hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_1, core->v_blank_f[1]);
+       hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_2, core->v_blank_f[2]);
+       hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_0, core->h_sync_gen[0]);
+       hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_1, core->h_sync_gen[1]);
+       hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_2, core->h_sync_gen[2]);
+       hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_0, core->v_sync_gen1[0]);
+       hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_1, core->v_sync_gen1[1]);
+       hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_2, core->v_sync_gen1[2]);
+       hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_0, core->v_sync_gen2[0]);
+       hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_1, core->v_sync_gen2[1]);
+       hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_2, core->v_sync_gen2[2]);
+       hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_0, core->v_sync_gen3[0]);
+       hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
+       hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
+       /* Timing generator registers */
+       hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h);
+       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l);
+       hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h);
+
+       /* waiting for HDMIPHY's PLL to get to steady state */
+       for (tries = 100; tries; --tries) {
+               u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS);
+               if (val & HDMI_PHY_STATUS_READY)
+                       break;
+               mdelay(1);
+       }
+       /* steady state not achieved */
+       if (tries == 0) {
+               DRM_ERROR("hdmiphy's pll could not reach steady state.\n");
+               hdmi_regs_dump(hdata, "timing apply");
+       }
+
+       clk_disable(hdata->res.sclk_hdmi);
+       clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_hdmiphy);
+       clk_enable(hdata->res.sclk_hdmi);
+
+       /* enable HDMI and timing generator */
+       hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN);
+       if (core->int_pro_mode[0])
+               hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN |
+                               HDMI_FIELD_EN);
+       else
+               hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN);
+}
+
+static void hdmiphy_conf_reset(struct hdmi_context *hdata)
+{
+       u8 buffer[2];
+
+       clk_disable(hdata->res.sclk_hdmi);
+       clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_pixel);
+       clk_enable(hdata->res.sclk_hdmi);
+
+       /* operation mode */
+       buffer[0] = 0x1f;
+       buffer[1] = 0x00;
+
+       if (hdata->hdmiphy_port)
+               i2c_master_send(hdata->hdmiphy_port, buffer, 2);
+
+       /* reset hdmiphy */
+       hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, ~0, HDMI_PHY_SW_RSTOUT);
+       mdelay(10);
+       hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT,  0, HDMI_PHY_SW_RSTOUT);
+       mdelay(10);
+}
+
+static void hdmiphy_conf_apply(struct hdmi_context *hdata)
+{
+       u8 buffer[32];
+       u8 operation[2];
+       u8 read_buffer[32] = {0, };
+       int ret;
+       int i;
+
+       if (!hdata->hdmiphy_port) {
+               DRM_ERROR("hdmiphy is not attached\n");
+               return;
+       }
+
+       /* pixel clock */
+       memcpy(buffer, hdmi_confs[hdata->cur_conf].hdmiphy_data, 32);
+       ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32);
+       if (ret != 32) {
+               DRM_ERROR("failed to configure HDMIPHY via I2C\n");
+               return;
+       }
+
+       mdelay(10);
+
+       /* operation mode */
+       operation[0] = 0x1f;
+       operation[1] = 0x80;
+
+       ret = i2c_master_send(hdata->hdmiphy_port, operation, 2);
+       if (ret != 2) {
+               DRM_ERROR("failed to enable hdmiphy\n");
+               return;
+       }
+
+       ret = i2c_master_recv(hdata->hdmiphy_port, read_buffer, 32);
+       if (ret < 0) {
+               DRM_ERROR("failed to read hdmiphy config\n");
+               return;
+       }
+
+       for (i = 0; i < ret; i++)
+               DRM_DEBUG_KMS("hdmiphy[0x%02x] write[0x%02x] - "
+                       "recv [0x%02x]\n", i, buffer[i], read_buffer[i]);
+}
+
+static void hdmi_conf_apply(struct hdmi_context *hdata)
+{
+       const struct hdmi_preset_conf *conf =
+                 hdmi_confs[hdata->cur_conf].conf;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       hdmiphy_conf_reset(hdata);
+       hdmiphy_conf_apply(hdata);
+
+       hdmi_conf_reset(hdata);
+       hdmi_conf_init(hdata);
+
+       /* setting core registers */
+       hdmi_timing_apply(hdata, conf);
+
+       hdmi_regs_dump(hdata, "start");
+}
+
+static void hdmi_mode_set(void *ctx, void *mode)
+{
+       struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+       int conf_idx;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       conf_idx = hdmi_conf_index(mode);
+       if (conf_idx >= 0 && conf_idx < ARRAY_SIZE(hdmi_confs))
+               hdata->cur_conf = conf_idx;
+       else
+               DRM_DEBUG_KMS("not supported mode\n");
+}
+
+static void hdmi_commit(void *ctx)
+{
+       struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       hdmi_conf_apply(hdata);
+
+       hdata->enabled = true;
+}
+
+static void hdmi_disable(void *ctx)
+{
+       struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       if (hdata->enabled) {
+               hdmiphy_conf_reset(hdata);
+               hdmi_conf_reset(hdata);
+       }
+}
+
+static struct exynos_hdmi_manager_ops manager_ops = {
+       .mode_set       = hdmi_mode_set,
+       .commit         = hdmi_commit,
+       .disable        = hdmi_disable,
+};
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+static void hdmi_hotplug_func(struct work_struct *work)
+{
+       struct hdmi_context *hdata =
+               container_of(work, struct hdmi_context, hotplug_work);
+       struct exynos_drm_hdmi_context *ctx =
+               (struct exynos_drm_hdmi_context *)hdata->parent_ctx;
+
+       drm_helper_hpd_irq_event(ctx->drm_dev);
+}
+
+static irqreturn_t hdmi_irq_handler(int irq, void *arg)
+{
+       struct exynos_drm_hdmi_context *ctx = arg;
+       struct hdmi_context *hdata = (struct hdmi_context *)ctx->ctx;
+       u32 intc_flag;
+
+       intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG);
+       /* clearing flags for HPD plug/unplug */
+       if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
+               DRM_DEBUG_KMS("unplugged, handling:%d\n", hdata->hpd_handle);
+               hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
+                       HDMI_INTC_FLAG_HPD_UNPLUG);
+       }
+       if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
+               DRM_DEBUG_KMS("plugged, handling:%d\n", hdata->hpd_handle);
+               hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
+                       HDMI_INTC_FLAG_HPD_PLUG);
+       }
+
+       if (ctx->drm_dev && hdata->hpd_handle)
+               queue_work(hdata->wq, &hdata->hotplug_work);
+
+       return IRQ_HANDLED;
+}
+
+static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
+{
+       struct device *dev = hdata->dev;
+       struct hdmi_resources *res = &hdata->res;
+       static char *supply[] = {
+               "hdmi-en",
+               "vdd",
+               "vdd_osc",
+               "vdd_pll",
+       };
+       int i, ret;
+
+       DRM_DEBUG_KMS("HDMI resource init\n");
+
+       memset(res, 0, sizeof *res);
+
+       /* get clocks, power */
+       res->hdmi = clk_get(dev, "hdmi");
+       if (IS_ERR_OR_NULL(res->hdmi)) {
+               DRM_ERROR("failed to get clock 'hdmi'\n");
+               goto fail;
+       }
+       res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+       if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
+               DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
+               goto fail;
+       }
+       res->sclk_pixel = clk_get(dev, "sclk_pixel");
+       if (IS_ERR_OR_NULL(res->sclk_pixel)) {
+               DRM_ERROR("failed to get clock 'sclk_pixel'\n");
+               goto fail;
+       }
+       res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
+       if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
+               DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
+               goto fail;
+       }
+       res->hdmiphy = clk_get(dev, "hdmiphy");
+       if (IS_ERR_OR_NULL(res->hdmiphy)) {
+               DRM_ERROR("failed to get clock 'hdmiphy'\n");
+               goto fail;
+       }
+
+       clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
+
+       res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
+               sizeof res->regul_bulk[0], GFP_KERNEL);
+       if (!res->regul_bulk) {
+               DRM_ERROR("failed to get memory for regulators\n");
+               goto fail;
+       }
+       for (i = 0; i < ARRAY_SIZE(supply); ++i) {
+               res->regul_bulk[i].supply = supply[i];
+               res->regul_bulk[i].consumer = NULL;
+       }
+       ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
+       if (ret) {
+               DRM_ERROR("failed to get regulators\n");
+               goto fail;
+       }
+       res->regul_count = ARRAY_SIZE(supply);
+
+       return 0;
+fail:
+       DRM_ERROR("HDMI resource init - failed\n");
+       return -ENODEV;
+}
+
+static int hdmi_resources_cleanup(struct hdmi_context *hdata)
+{
+       struct hdmi_resources *res = &hdata->res;
+
+       regulator_bulk_free(res->regul_count, res->regul_bulk);
+       /* kfree is NULL-safe */
+       kfree(res->regul_bulk);
+       if (!IS_ERR_OR_NULL(res->hdmiphy))
+               clk_put(res->hdmiphy);
+       if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
+               clk_put(res->sclk_hdmiphy);
+       if (!IS_ERR_OR_NULL(res->sclk_pixel))
+               clk_put(res->sclk_pixel);
+       if (!IS_ERR_OR_NULL(res->sclk_hdmi))
+               clk_put(res->sclk_hdmi);
+       if (!IS_ERR_OR_NULL(res->hdmi))
+               clk_put(res->hdmi);
+       memset(res, 0, sizeof *res);
+
+       return 0;
+}
+
+static void hdmi_resource_poweron(struct hdmi_context *hdata)
+{
+       struct hdmi_resources *res = &hdata->res;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       /* turn HDMI power on */
+       regulator_bulk_enable(res->regul_count, res->regul_bulk);
+       /* power-on hdmi physical interface */
+       clk_enable(res->hdmiphy);
+       /* turn clocks on */
+       clk_enable(res->hdmi);
+       clk_enable(res->sclk_hdmi);
+
+       hdmiphy_conf_reset(hdata);
+       hdmi_conf_reset(hdata);
+       hdmi_conf_init(hdata);
+
+}
+
+static void hdmi_resource_poweroff(struct hdmi_context *hdata)
+{
+       struct hdmi_resources *res = &hdata->res;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       /* turn clocks off */
+       clk_disable(res->sclk_hdmi);
+       clk_disable(res->hdmi);
+       /* power-off hdmiphy */
+       clk_disable(res->hdmiphy);
+       /* turn HDMI power off */
+       regulator_bulk_disable(res->regul_count, res->regul_bulk);
+}
+
+static int hdmi_runtime_suspend(struct device *dev)
+{
+       struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       hdmi_resource_poweroff((struct hdmi_context *)ctx->ctx);
+
+       return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+       struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       hdmi_resource_poweron((struct hdmi_context *)ctx->ctx);
+
+       return 0;
+}
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+       .runtime_suspend = hdmi_runtime_suspend,
+       .runtime_resume  = hdmi_runtime_resume,
+};
+
+static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
+
+void hdmi_attach_ddc_client(struct i2c_client *ddc)
+{
+       if (ddc)
+               hdmi_ddc = ddc;
+}
+EXPORT_SYMBOL(hdmi_attach_ddc_client);
+
+void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy)
+{
+       if (hdmiphy)
+               hdmi_hdmiphy = hdmiphy;
+}
+EXPORT_SYMBOL(hdmi_attach_hdmiphy_client);
+
+static int __devinit hdmi_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+       struct hdmi_context *hdata;
+       struct exynos_drm_hdmi_pdata *pdata;
+       struct resource *res;
+       int ret;
+
+       DRM_DEBUG_KMS("[%d]\n", __LINE__);
+
+       pdata = pdev->dev.platform_data;
+       if (!pdata) {
+               DRM_ERROR("no platform data specified\n");
+               return -EINVAL;
+       }
+
+       drm_hdmi_ctx = kzalloc(sizeof(*drm_hdmi_ctx), GFP_KERNEL);
+       if (!drm_hdmi_ctx) {
+               DRM_ERROR("failed to allocate common hdmi context.\n");
+               return -ENOMEM;
+       }
+
+       hdata = kzalloc(sizeof(struct hdmi_context), GFP_KERNEL);
+       if (!hdata) {
+               DRM_ERROR("out of memory\n");
+               kfree(drm_hdmi_ctx);
+               return -ENOMEM;
+       }
+
+       drm_hdmi_ctx->ctx = (void *)hdata;
+       hdata->parent_ctx = (void *)drm_hdmi_ctx;
+
+       platform_set_drvdata(pdev, drm_hdmi_ctx);
+
+       hdata->default_win = pdata->default_win;
+       hdata->default_timing = &pdata->timing;
+       hdata->default_bpp = pdata->bpp;
+       hdata->dev = dev;
+
+       ret = hdmi_resources_init(hdata);
+       if (ret) {
+               ret = -EINVAL;
+               goto err_data;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               DRM_ERROR("failed to find registers\n");
+               ret = -ENOENT;
+               goto err_resource;
+       }
+
+       hdata->regs_res = request_mem_region(res->start, resource_size(res),
+                                          dev_name(dev));
+       if (!hdata->regs_res) {
+               DRM_ERROR("failed to claim register region\n");
+               ret = -ENOENT;
+               goto err_resource;
+       }
+
+       hdata->regs = ioremap(res->start, resource_size(res));
+       if (!hdata->regs) {
+               DRM_ERROR("failed to map registers\n");
+               ret = -ENXIO;
+               goto err_req_region;
+       }
+
+       /* DDC i2c driver */
+       if (i2c_add_driver(&ddc_driver)) {
+               DRM_ERROR("failed to register ddc i2c driver\n");
+               ret = -ENOENT;
+               goto err_iomap;
+       }
+
+       hdata->ddc_port = hdmi_ddc;
+
+       /* hdmiphy i2c driver */
+       if (i2c_add_driver(&hdmiphy_driver)) {
+               DRM_ERROR("failed to register hdmiphy i2c driver\n");
+               ret = -ENOENT;
+               goto err_ddc;
+       }
+
+       hdata->hdmiphy_port = hdmi_hdmiphy;
+
+       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (res == NULL) {
+               DRM_ERROR("get interrupt resource failed.\n");
+               ret = -ENXIO;
+               goto err_hdmiphy;
+       }
+
+       /* create workqueue and hotplug work */
+       hdata->wq = alloc_workqueue("exynos-drm-hdmi",
+                       WQ_UNBOUND | WQ_NON_REENTRANT, 1);
+       if (hdata->wq == NULL) {
+               DRM_ERROR("Failed to create workqueue.\n");
+               ret = -ENOMEM;
+               goto err_hdmiphy;
+       }
+       INIT_WORK(&hdata->hotplug_work, hdmi_hotplug_func);
+
+       /* register hpd interrupt */
+       ret = request_irq(res->start, hdmi_irq_handler, 0, "drm_hdmi",
+                               drm_hdmi_ctx);
+       if (ret) {
+               DRM_ERROR("request interrupt failed.\n");
+               goto err_workqueue;
+       }
+       hdata->irq = res->start;
+
+       /* register specific callbacks to common hdmi. */
+       exynos_drm_display_ops_register(&display_ops);
+       exynos_drm_manager_ops_register(&manager_ops);
+
+       hdmi_resource_poweron(hdata);
+
+       return 0;
+
+err_workqueue:
+       destroy_workqueue(hdata->wq);
+err_hdmiphy:
+       i2c_del_driver(&hdmiphy_driver);
+err_ddc:
+       i2c_del_driver(&ddc_driver);
+err_iomap:
+       iounmap(hdata->regs);
+err_req_region:
+       release_resource(hdata->regs_res);
+       kfree(hdata->regs_res);
+err_resource:
+       hdmi_resources_cleanup(hdata);
+err_data:
+       kfree(hdata);
+       kfree(drm_hdmi_ctx);
+       return ret;
+}
+
+static int __devexit hdmi_remove(struct platform_device *pdev)
+{
+       struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
+       struct hdmi_context *hdata = (struct hdmi_context *)ctx->ctx;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       hdmi_resource_poweroff(hdata);
+
+       disable_irq(hdata->irq);
+       free_irq(hdata->irq, hdata);
+
+       cancel_work_sync(&hdata->hotplug_work);
+       destroy_workqueue(hdata->wq);
+
+       hdmi_resources_cleanup(hdata);
+
+       iounmap(hdata->regs);
+
+       release_resource(hdata->regs_res);
+       kfree(hdata->regs_res);
+
+       /* hdmiphy i2c driver */
+       i2c_del_driver(&hdmiphy_driver);
+       /* DDC i2c driver */
+       i2c_del_driver(&ddc_driver);
+
+       kfree(hdata);
+
+       return 0;
+}
+
+struct platform_driver hdmi_driver = {
+       .probe          = hdmi_probe,
+       .remove         = __devexit_p(hdmi_remove),
+       .driver         = {
+               .name   = "exynos4-hdmi",
+               .owner  = THIS_MODULE,
+               .pm = &hdmi_pm_ops,
+       },
+};
+EXPORT_SYMBOL(hdmi_driver);
+
+MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>");
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_DESCRIPTION("Samsung DRM HDMI core Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.h b/drivers/gpu/drm/exynos/exynos_hdmi.h
new file mode 100644 (file)
index 0000000..31d6cf8
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *     Inki Dae <inki.dae@samsung.com>
+ *     Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_HDMI_H_
+#define _EXYNOS_HDMI_H_
+
+struct hdmi_conf {
+       int width;
+       int height;
+       int vrefresh;
+       bool interlace;
+       const u8 *hdmiphy_data;
+       const struct hdmi_preset_conf *conf;
+};
+
+struct hdmi_resources {
+       struct clk *hdmi;
+       struct clk *sclk_hdmi;
+       struct clk *sclk_pixel;
+       struct clk *sclk_hdmiphy;
+       struct clk *hdmiphy;
+       struct regulator_bulk_data *regul_bulk;
+       int regul_count;
+};
+
+struct hdmi_context {
+       struct device                   *dev;
+       struct drm_device               *drm_dev;
+       struct fb_videomode             *default_timing;
+       unsigned int                    default_win;
+       unsigned int                    default_bpp;
+       bool                            hpd_handle;
+       bool                            enabled;
+
+       struct resource                 *regs_res;
+       /** base address of HDMI registers */
+       void __iomem *regs;
+       /** HDMI hotplug interrupt */
+       unsigned int irq;
+       /** workqueue for delayed work */
+       struct workqueue_struct *wq;
+       /** hotplug handling work */
+       struct work_struct hotplug_work;
+
+       struct i2c_client *ddc_port;
+       struct i2c_client *hdmiphy_port;
+
+       /** current hdmiphy conf index */
+       int cur_conf;
+       /** other resources */
+       struct hdmi_resources res;
+
+       void *parent_ctx;
+};
+
+
+void hdmi_attach_ddc_client(struct i2c_client *ddc);
+void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy);
+
+extern struct i2c_driver hdmiphy_driver;
+extern struct i2c_driver ddc_driver;
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
new file mode 100644 (file)
index 0000000..9fe2995
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ *     Seung-Woo Kim <sw0312.kim@samsung.com>
+ *     Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_hdmi.h"
+
+
+static int hdmiphy_probe(struct i2c_client *client,
+       const struct i2c_device_id *id)
+{
+       hdmi_attach_hdmiphy_client(client);
+
+       dev_info(&client->adapter->dev, "attached s5p_hdmiphy "
+               "into i2c adapter successfully\n");
+
+       return 0;
+}
+
+static int hdmiphy_remove(struct i2c_client *client)
+{
+       dev_info(&client->adapter->dev, "detached s5p_hdmiphy "
+               "from i2c adapter successfully\n");
+
+       return 0;
+}
+
+static const struct i2c_device_id hdmiphy_id[] = {
+       { "s5p_hdmiphy", 0 },
+       { },
+};
+
+struct i2c_driver hdmiphy_driver = {
+       .driver = {
+               .name   = "s5p-hdmiphy",
+               .owner  = THIS_MODULE,
+       },
+       .id_table = hdmiphy_id,
+       .probe          = hdmiphy_probe,
+       .remove         = __devexit_p(hdmiphy_remove),
+       .command                = NULL,
+};
+EXPORT_SYMBOL(hdmiphy_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
new file mode 100644 (file)
index 0000000..ac24cff
--- /dev/null
@@ -0,0 +1,1070 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *     Inki Dae <inki.dae@samsung.com>
+ *     Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * Based on drivers/media/video/s5p-tv/mixer_reg.c
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include "regs-mixer.h"
+#include "regs-vp.h"
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_hdmi.h"
+#include "exynos_hdmi.h"
+#include "exynos_mixer.h"
+
+#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
+
+static const u8 filter_y_horiz_tap8[] = {
+       0,      -1,     -1,     -1,     -1,     -1,     -1,     -1,
+       -1,     -1,     -1,     -1,     -1,     0,      0,      0,
+       0,      2,      4,      5,      6,      6,      6,      6,
+       6,      5,      5,      4,      3,      2,      1,      1,
+       0,      -6,     -12,    -16,    -18,    -20,    -21,    -20,
+       -20,    -18,    -16,    -13,    -10,    -8,     -5,     -2,
+       127,    126,    125,    121,    114,    107,    99,     89,
+       79,     68,     57,     46,     35,     25,     16,     8,
+};
+
+static const u8 filter_y_vert_tap4[] = {
+       0,      -3,     -6,     -8,     -8,     -8,     -8,     -7,
+       -6,     -5,     -4,     -3,     -2,     -1,     -1,     0,
+       127,    126,    124,    118,    111,    102,    92,     81,
+       70,     59,     48,     37,     27,     19,     11,     5,
+       0,      5,      11,     19,     27,     37,     48,     59,
+       70,     81,     92,     102,    111,    118,    124,    126,
+       0,      0,      -1,     -1,     -2,     -3,     -4,     -5,
+       -6,     -7,     -8,     -8,     -8,     -8,     -6,     -3,
+};
+
+static const u8 filter_cr_horiz_tap4[] = {
+       0,      -3,     -6,     -8,     -8,     -8,     -8,     -7,
+       -6,     -5,     -4,     -3,     -2,     -1,     -1,     0,
+       127,    126,    124,    118,    111,    102,    92,     81,
+       70,     59,     48,     37,     27,     19,     11,     5,
+};
+
+static inline u32 vp_reg_read(struct mixer_resources *res, u32 reg_id)
+{
+       return readl(res->vp_regs + reg_id);
+}
+
+static inline void vp_reg_write(struct mixer_resources *res, u32 reg_id,
+                                u32 val)
+{
+       writel(val, res->vp_regs + reg_id);
+}
+
+static inline void vp_reg_writemask(struct mixer_resources *res, u32 reg_id,
+                                u32 val, u32 mask)
+{
+       u32 old = vp_reg_read(res, reg_id);
+
+       val = (val & mask) | (old & ~mask);
+       writel(val, res->vp_regs + reg_id);
+}
+
+static inline u32 mixer_reg_read(struct mixer_resources *res, u32 reg_id)
+{
+       return readl(res->mixer_regs + reg_id);
+}
+
+static inline void mixer_reg_write(struct mixer_resources *res, u32 reg_id,
+                                u32 val)
+{
+       writel(val, res->mixer_regs + reg_id);
+}
+
+static inline void mixer_reg_writemask(struct mixer_resources *res,
+                                u32 reg_id, u32 val, u32 mask)
+{
+       u32 old = mixer_reg_read(res, reg_id);
+
+       val = (val & mask) | (old & ~mask);
+       writel(val, res->mixer_regs + reg_id);
+}
+
+static void mixer_regs_dump(struct mixer_context *ctx)
+{
+#define DUMPREG(reg_id) \
+do { \
+       DRM_DEBUG_KMS(#reg_id " = %08x\n", \
+               (u32)readl(ctx->mixer_res.mixer_regs + reg_id)); \
+} while (0)
+
+       DUMPREG(MXR_STATUS);
+       DUMPREG(MXR_CFG);
+       DUMPREG(MXR_INT_EN);
+       DUMPREG(MXR_INT_STATUS);
+
+       DUMPREG(MXR_LAYER_CFG);
+       DUMPREG(MXR_VIDEO_CFG);
+
+       DUMPREG(MXR_GRAPHIC0_CFG);
+       DUMPREG(MXR_GRAPHIC0_BASE);
+       DUMPREG(MXR_GRAPHIC0_SPAN);
+       DUMPREG(MXR_GRAPHIC0_WH);
+       DUMPREG(MXR_GRAPHIC0_SXY);
+       DUMPREG(MXR_GRAPHIC0_DXY);
+
+       DUMPREG(MXR_GRAPHIC1_CFG);
+       DUMPREG(MXR_GRAPHIC1_BASE);
+       DUMPREG(MXR_GRAPHIC1_SPAN);
+       DUMPREG(MXR_GRAPHIC1_WH);
+       DUMPREG(MXR_GRAPHIC1_SXY);
+       DUMPREG(MXR_GRAPHIC1_DXY);
+#undef DUMPREG
+}
+
+static void vp_regs_dump(struct mixer_context *ctx)
+{
+#define DUMPREG(reg_id) \
+do { \
+       DRM_DEBUG_KMS(#reg_id " = %08x\n", \
+               (u32) readl(ctx->mixer_res.vp_regs + reg_id)); \
+} while (0)
+
+       DUMPREG(VP_ENABLE);
+       DUMPREG(VP_SRESET);
+       DUMPREG(VP_SHADOW_UPDATE);
+       DUMPREG(VP_FIELD_ID);
+       DUMPREG(VP_MODE);
+       DUMPREG(VP_IMG_SIZE_Y);
+       DUMPREG(VP_IMG_SIZE_C);
+       DUMPREG(VP_PER_RATE_CTRL);
+       DUMPREG(VP_TOP_Y_PTR);
+       DUMPREG(VP_BOT_Y_PTR);
+       DUMPREG(VP_TOP_C_PTR);
+       DUMPREG(VP_BOT_C_PTR);
+       DUMPREG(VP_ENDIAN_MODE);
+       DUMPREG(VP_SRC_H_POSITION);
+       DUMPREG(VP_SRC_V_POSITION);
+       DUMPREG(VP_SRC_WIDTH);
+       DUMPREG(VP_SRC_HEIGHT);
+       DUMPREG(VP_DST_H_POSITION);
+       DUMPREG(VP_DST_V_POSITION);
+       DUMPREG(VP_DST_WIDTH);
+       DUMPREG(VP_DST_HEIGHT);
+       DUMPREG(VP_H_RATIO);
+       DUMPREG(VP_V_RATIO);
+
+#undef DUMPREG
+}
+
+static inline void vp_filter_set(struct mixer_resources *res,
+               int reg_id, const u8 *data, unsigned int size)
+{
+       /* assure 4-byte align */
+       BUG_ON(size & 3);
+       for (; size; size -= 4, reg_id += 4, data += 4) {
+               u32 val = (data[0] << 24) |  (data[1] << 16) |
+                       (data[2] << 8) | data[3];
+               vp_reg_write(res, reg_id, val);
+       }
+}
+
+static void vp_default_filter(struct mixer_resources *res)
+{
+       vp_filter_set(res, VP_POLY8_Y0_LL,
+               filter_y_horiz_tap8, sizeof filter_y_horiz_tap8);
+       vp_filter_set(res, VP_POLY4_Y0_LL,
+               filter_y_vert_tap4, sizeof filter_y_vert_tap4);
+       vp_filter_set(res, VP_POLY4_C0_LL,
+               filter_cr_horiz_tap4, sizeof filter_cr_horiz_tap4);
+}
+
+static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+
+       /* block update on vsync */
+       mixer_reg_writemask(res, MXR_STATUS, enable ?
+                       MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
+
+       vp_reg_write(res, VP_SHADOW_UPDATE, enable ?
+                       VP_SHADOW_UPDATE_ENABLE : 0);
+}
+
+static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+       u32 val;
+
+       /* choosing between interlace and progressive mode */
+       val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE :
+                               MXR_CFG_SCAN_PROGRASSIVE);
+
+       /* choosing between porper HD and SD mode */
+       if (height == 480)
+               val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
+       else if (height == 576)
+               val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
+       else if (height == 720)
+               val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
+       else if (height == 1080)
+               val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
+       else
+               val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
+
+       mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_SCAN_MASK);
+}
+
+static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+       u32 val;
+
+       if (height == 480) {
+               val = MXR_CFG_RGB601_0_255;
+       } else if (height == 576) {
+               val = MXR_CFG_RGB601_0_255;
+       } else if (height == 720) {
+               val = MXR_CFG_RGB709_16_235;
+               mixer_reg_write(res, MXR_CM_COEFF_Y,
+                               (1 << 30) | (94 << 20) | (314 << 10) |
+                               (32 << 0));
+               mixer_reg_write(res, MXR_CM_COEFF_CB,
+                               (972 << 20) | (851 << 10) | (225 << 0));
+               mixer_reg_write(res, MXR_CM_COEFF_CR,
+                               (225 << 20) | (820 << 10) | (1004 << 0));
+       } else if (height == 1080) {
+               val = MXR_CFG_RGB709_16_235;
+               mixer_reg_write(res, MXR_CM_COEFF_Y,
+                               (1 << 30) | (94 << 20) | (314 << 10) |
+                               (32 << 0));
+               mixer_reg_write(res, MXR_CM_COEFF_CB,
+                               (972 << 20) | (851 << 10) | (225 << 0));
+               mixer_reg_write(res, MXR_CM_COEFF_CR,
+                               (225 << 20) | (820 << 10) | (1004 << 0));
+       } else {
+               val = MXR_CFG_RGB709_16_235;
+               mixer_reg_write(res, MXR_CM_COEFF_Y,
+                               (1 << 30) | (94 << 20) | (314 << 10) |
+                               (32 << 0));
+               mixer_reg_write(res, MXR_CM_COEFF_CB,
+                               (972 << 20) | (851 << 10) | (225 << 0));
+               mixer_reg_write(res, MXR_CM_COEFF_CR,
+                               (225 << 20) | (820 << 10) | (1004 << 0));
+       }
+
+       mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
+}
+
+static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+       u32 val = enable ? ~0 : 0;
+
+       switch (win) {
+       case 0:
+               mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
+               break;
+       case 1:
+               mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
+               break;
+       case 2:
+               vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON);
+               mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_VP_ENABLE);
+               break;
+       }
+}
+
+static void mixer_run(struct mixer_context *ctx)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+
+       mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
+
+       mixer_regs_dump(ctx);
+}
+
+static void vp_video_buffer(struct mixer_context *ctx, int win)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+       unsigned long flags;
+       struct hdmi_win_data *win_data;
+       unsigned int full_width, full_height, width, height;
+       unsigned int x_ratio, y_ratio;
+       unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
+       unsigned int mode_width, mode_height;
+       unsigned int buf_num;
+       dma_addr_t luma_addr[2], chroma_addr[2];
+       bool tiled_mode = false;
+       bool crcb_mode = false;
+       u32 val;
+
+       win_data = &ctx->win_data[win];
+
+       switch (win_data->pixel_format) {
+       case DRM_FORMAT_NV12MT:
+               tiled_mode = true;
+       case DRM_FORMAT_NV12M:
+               crcb_mode = false;
+               buf_num = 2;
+               break;
+       /* TODO: single buffer format NV12, NV21 */
+       default:
+               /* ignore pixel format at disable time */
+               if (!win_data->dma_addr)
+                       break;
+
+               DRM_ERROR("pixel format for vp is wrong [%d].\n",
+                               win_data->pixel_format);
+               return;
+       }
+
+       full_width = win_data->fb_width;
+       full_height = win_data->fb_height;
+       width = win_data->crtc_width;
+       height = win_data->crtc_height;
+       mode_width = win_data->mode_width;
+       mode_height = win_data->mode_height;
+
+       /* scaling feature: (src << 16) / dst */
+       x_ratio = (width << 16) / width;
+       y_ratio = (height << 16) / height;
+
+       src_x_offset = win_data->fb_x;
+       src_y_offset = win_data->fb_y;
+       dst_x_offset = win_data->crtc_x;
+       dst_y_offset = win_data->crtc_y;
+
+       if (buf_num == 2) {
+               luma_addr[0] = win_data->dma_addr;
+               chroma_addr[0] = win_data->chroma_dma_addr;
+       } else {
+               luma_addr[0] = win_data->dma_addr;
+               chroma_addr[0] = win_data->dma_addr
+                       + (full_width * full_height);
+       }
+
+       if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
+               ctx->interlace = true;
+               if (tiled_mode) {
+                       luma_addr[1] = luma_addr[0] + 0x40;
+                       chroma_addr[1] = chroma_addr[0] + 0x40;
+               } else {
+                       luma_addr[1] = luma_addr[0] + full_width;
+                       chroma_addr[1] = chroma_addr[0] + full_width;
+               }
+       } else {
+               ctx->interlace = false;
+               luma_addr[1] = 0;
+               chroma_addr[1] = 0;
+       }
+
+       spin_lock_irqsave(&res->reg_slock, flags);
+       mixer_vsync_set_update(ctx, false);
+
+       /* interlace or progressive scan mode */
+       val = (ctx->interlace ? ~0 : 0);
+       vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP);
+
+       /* setup format */
+       val = (crcb_mode ? VP_MODE_NV21 : VP_MODE_NV12);
+       val |= (tiled_mode ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR);
+       vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
+
+       /* setting size of input image */
+       vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(full_width) |
+               VP_IMG_VSIZE(full_height));
+       /* chroma height has to reduced by 2 to avoid chroma distorions */
+       vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(full_width) |
+               VP_IMG_VSIZE(full_height / 2));
+
+       vp_reg_write(res, VP_SRC_WIDTH, width);
+       vp_reg_write(res, VP_SRC_HEIGHT, height);
+       vp_reg_write(res, VP_SRC_H_POSITION,
+                       VP_SRC_H_POSITION_VAL(src_x_offset));
+       vp_reg_write(res, VP_SRC_V_POSITION, src_y_offset);
+
+       vp_reg_write(res, VP_DST_WIDTH, width);
+       vp_reg_write(res, VP_DST_H_POSITION, dst_x_offset);
+       if (ctx->interlace) {
+               vp_reg_write(res, VP_DST_HEIGHT, height / 2);
+               vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset / 2);
+       } else {
+               vp_reg_write(res, VP_DST_HEIGHT, height);
+               vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset);
+       }
+
+       vp_reg_write(res, VP_H_RATIO, x_ratio);
+       vp_reg_write(res, VP_V_RATIO, y_ratio);
+
+       vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
+
+       /* set buffer address to vp */
+       vp_reg_write(res, VP_TOP_Y_PTR, luma_addr[0]);
+       vp_reg_write(res, VP_BOT_Y_PTR, luma_addr[1]);
+       vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
+       vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
+
+       mixer_cfg_scan(ctx, mode_height);
+       mixer_cfg_rgb_fmt(ctx, mode_height);
+       mixer_cfg_layer(ctx, win, true);
+       mixer_run(ctx);
+
+       mixer_vsync_set_update(ctx, true);
+       spin_unlock_irqrestore(&res->reg_slock, flags);
+
+       vp_regs_dump(ctx);
+}
+
+static void mixer_graph_buffer(struct mixer_context *ctx, int win)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+       unsigned long flags;
+       struct hdmi_win_data *win_data;
+       unsigned int full_width, width, height;
+       unsigned int x_ratio, y_ratio;
+       unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
+       unsigned int mode_width, mode_height;
+       dma_addr_t dma_addr;
+       unsigned int fmt;
+       u32 val;
+
+       win_data = &ctx->win_data[win];
+
+       #define RGB565 4
+       #define ARGB1555 5
+       #define ARGB4444 6
+       #define ARGB8888 7
+
+       switch (win_data->bpp) {
+       case 16:
+               fmt = ARGB4444;
+               break;
+       case 32:
+               fmt = ARGB8888;
+               break;
+       default:
+               fmt = ARGB8888;
+       }
+
+       dma_addr = win_data->dma_addr;
+       full_width = win_data->fb_width;
+       width = win_data->crtc_width;
+       height = win_data->crtc_height;
+       mode_width = win_data->mode_width;
+       mode_height = win_data->mode_height;
+
+       /* 2x scaling feature */
+       x_ratio = 0;
+       y_ratio = 0;
+
+       src_x_offset = win_data->fb_x;
+       src_y_offset = win_data->fb_y;
+       dst_x_offset = win_data->crtc_x;
+       dst_y_offset = win_data->crtc_y;
+
+       /* converting dma address base and source offset */
+       dma_addr = dma_addr
+               + (src_x_offset * win_data->bpp >> 3)
+               + (src_y_offset * full_width * win_data->bpp >> 3);
+       src_x_offset = 0;
+       src_y_offset = 0;
+
+       if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE)
+               ctx->interlace = true;
+       else
+               ctx->interlace = false;
+
+       spin_lock_irqsave(&res->reg_slock, flags);
+       mixer_vsync_set_update(ctx, false);
+
+       /* setup format */
+       mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win),
+               MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
+
+       /* setup geometry */
+       mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), full_width);
+
+       val  = MXR_GRP_WH_WIDTH(width);
+       val |= MXR_GRP_WH_HEIGHT(height);
+       val |= MXR_GRP_WH_H_SCALE(x_ratio);
+       val |= MXR_GRP_WH_V_SCALE(y_ratio);
+       mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
+
+       /* setup offsets in source image */
+       val  = MXR_GRP_SXY_SX(src_x_offset);
+       val |= MXR_GRP_SXY_SY(src_y_offset);
+       mixer_reg_write(res, MXR_GRAPHIC_SXY(win), val);
+
+       /* setup offsets in display image */
+       val  = MXR_GRP_DXY_DX(dst_x_offset);
+       val |= MXR_GRP_DXY_DY(dst_y_offset);
+       mixer_reg_write(res, MXR_GRAPHIC_DXY(win), val);
+
+       /* set buffer address to mixer */
+       mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
+
+       mixer_cfg_scan(ctx, mode_height);
+       mixer_cfg_rgb_fmt(ctx, mode_height);
+       mixer_cfg_layer(ctx, win, true);
+       mixer_run(ctx);
+
+       mixer_vsync_set_update(ctx, true);
+       spin_unlock_irqrestore(&res->reg_slock, flags);
+}
+
+static void vp_win_reset(struct mixer_context *ctx)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+       int tries = 100;
+
+       vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING);
+       for (tries = 100; tries; --tries) {
+               /* waiting until VP_SRESET_PROCESSING is 0 */
+               if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)
+                       break;
+               mdelay(10);
+       }
+       WARN(tries == 0, "failed to reset Video Processor\n");
+}
+
+static int mixer_enable_vblank(void *ctx, int pipe)
+{
+       struct mixer_context *mixer_ctx = ctx;
+       struct mixer_resources *res = &mixer_ctx->mixer_res;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       mixer_ctx->pipe = pipe;
+
+       /* enable vsync interrupt */
+       mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
+                       MXR_INT_EN_VSYNC);
+
+       return 0;
+}
+
+static void mixer_disable_vblank(void *ctx)
+{
+       struct mixer_context *mixer_ctx = ctx;
+       struct mixer_resources *res = &mixer_ctx->mixer_res;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       /* disable vsync interrupt */
+       mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
+}
+
+static void mixer_win_mode_set(void *ctx,
+                             struct exynos_drm_overlay *overlay)
+{
+       struct mixer_context *mixer_ctx = ctx;
+       struct hdmi_win_data *win_data;
+       int win;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       if (!overlay) {
+               DRM_ERROR("overlay is NULL\n");
+               return;
+       }
+
+       DRM_DEBUG_KMS("set [%d]x[%d] at (%d,%d) to [%d]x[%d] at (%d,%d)\n",
+                                overlay->fb_width, overlay->fb_height,
+                                overlay->fb_x, overlay->fb_y,
+                                overlay->crtc_width, overlay->crtc_height,
+                                overlay->crtc_x, overlay->crtc_y);
+
+       win = overlay->zpos;
+       if (win == DEFAULT_ZPOS)
+               win = mixer_ctx->default_win;
+
+       if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+               DRM_ERROR("overlay plane[%d] is wrong\n", win);
+               return;
+       }
+
+       win_data = &mixer_ctx->win_data[win];
+
+       win_data->dma_addr = overlay->dma_addr[0];
+       win_data->vaddr = overlay->vaddr[0];
+       win_data->chroma_dma_addr = overlay->dma_addr[1];
+       win_data->chroma_vaddr = overlay->vaddr[1];
+       win_data->pixel_format = overlay->pixel_format;
+       win_data->bpp = overlay->bpp;
+
+       win_data->crtc_x = overlay->crtc_x;
+       win_data->crtc_y = overlay->crtc_y;
+       win_data->crtc_width = overlay->crtc_width;
+       win_data->crtc_height = overlay->crtc_height;
+
+       win_data->fb_x = overlay->fb_x;
+       win_data->fb_y = overlay->fb_y;
+       win_data->fb_width = overlay->fb_width;
+       win_data->fb_height = overlay->fb_height;
+
+       win_data->mode_width = overlay->mode_width;
+       win_data->mode_height = overlay->mode_height;
+
+       win_data->scan_flags = overlay->scan_flag;
+}
+
+static void mixer_win_commit(void *ctx, int zpos)
+{
+       struct mixer_context *mixer_ctx = ctx;
+       int win = zpos;
+
+       DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
+
+       if (win == DEFAULT_ZPOS)
+               win = mixer_ctx->default_win;
+
+       if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+               DRM_ERROR("overlay plane[%d] is wrong\n", win);
+               return;
+       }
+
+       if (win > 1)
+               vp_video_buffer(mixer_ctx, win);
+       else
+               mixer_graph_buffer(mixer_ctx, win);
+}
+
+static void mixer_win_disable(void *ctx, int zpos)
+{
+       struct mixer_context *mixer_ctx = ctx;
+       struct mixer_resources *res = &mixer_ctx->mixer_res;
+       unsigned long flags;
+       int win = zpos;
+
+       DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
+
+       if (win == DEFAULT_ZPOS)
+               win = mixer_ctx->default_win;
+
+       if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+               DRM_ERROR("overlay plane[%d] is wrong\n", win);
+               return;
+       }
+
+       spin_lock_irqsave(&res->reg_slock, flags);
+       mixer_vsync_set_update(mixer_ctx, false);
+
+       mixer_cfg_layer(mixer_ctx, win, false);
+
+       mixer_vsync_set_update(mixer_ctx, true);
+       spin_unlock_irqrestore(&res->reg_slock, flags);
+}
+
+static struct exynos_hdmi_overlay_ops overlay_ops = {
+       .enable_vblank          = mixer_enable_vblank,
+       .disable_vblank         = mixer_disable_vblank,
+       .win_mode_set           = mixer_win_mode_set,
+       .win_commit             = mixer_win_commit,
+       .win_disable            = mixer_win_disable,
+};
+
+/* for pageflip event */
+static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
+{
+       struct exynos_drm_private *dev_priv = drm_dev->dev_private;
+       struct drm_pending_vblank_event *e, *t;
+       struct timeval now;
+       unsigned long flags;
+       bool is_checked = false;
+
+       spin_lock_irqsave(&drm_dev->event_lock, flags);
+
+       list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
+                       base.link) {
+               /* if event's pipe isn't same as crtc then ignore it. */
+               if (crtc != e->pipe)
+                       continue;
+
+               is_checked = true;
+               do_gettimeofday(&now);
+               e->event.sequence = 0;
+               e->event.tv_sec = now.tv_sec;
+               e->event.tv_usec = now.tv_usec;
+
+               list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+               wake_up_interruptible(&e->base.file_priv->event_wait);
+       }
+
+       if (is_checked)
+               drm_vblank_put(drm_dev, crtc);
+
+       spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+}
+
+static irqreturn_t mixer_irq_handler(int irq, void *arg)
+{
+       struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
+       struct mixer_context *ctx =
+                       (struct mixer_context *)drm_hdmi_ctx->ctx;
+       struct mixer_resources *res = &ctx->mixer_res;
+       u32 val, val_base;
+
+       spin_lock(&res->reg_slock);
+
+       /* read interrupt status for handling and clearing flags for VSYNC */
+       val = mixer_reg_read(res, MXR_INT_STATUS);
+
+       /* handling VSYNC */
+       if (val & MXR_INT_STATUS_VSYNC) {
+               /* interlace scan need to check shadow register */
+               if (ctx->interlace) {
+                       val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
+                       if (ctx->win_data[0].dma_addr != val_base)
+                               goto out;
+
+                       val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
+                       if (ctx->win_data[1].dma_addr != val_base)
+                               goto out;
+               }
+
+               drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
+               mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe);
+       }
+
+out:
+       /* clear interrupts */
+       if (~val & MXR_INT_EN_VSYNC) {
+               /* vsync interrupt use different bit for read and clear */
+               val &= ~MXR_INT_EN_VSYNC;
+               val |= MXR_INT_CLEAR_VSYNC;
+       }
+       mixer_reg_write(res, MXR_INT_STATUS, val);
+
+       spin_unlock(&res->reg_slock);
+
+       return IRQ_HANDLED;
+}
+
+static void mixer_win_reset(struct mixer_context *ctx)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+       unsigned long flags;
+       u32 val; /* value stored to register */
+
+       spin_lock_irqsave(&res->reg_slock, flags);
+       mixer_vsync_set_update(ctx, false);
+
+       mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
+
+       /* set output in RGB888 mode */
+       mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
+
+       /* 16 beat burst in DMA */
+       mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
+               MXR_STATUS_BURST_MASK);
+
+       /* setting default layer priority: layer1 > video > layer0
+        * because typical usage scenario would be
+        * layer0 - framebuffer
+        * video - video overlay
+        * layer1 - OSD
+        */
+       val  = MXR_LAYER_CFG_GRP0_VAL(1);
+       val |= MXR_LAYER_CFG_VP_VAL(2);
+       val |= MXR_LAYER_CFG_GRP1_VAL(3);
+       mixer_reg_write(res, MXR_LAYER_CFG, val);
+
+       /* setting background color */
+       mixer_reg_write(res, MXR_BG_COLOR0, 0x008080);
+       mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
+       mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
+
+       /* setting graphical layers */
+
+       val  = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
+       val |= MXR_GRP_CFG_WIN_BLEND_EN;
+       val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
+
+       /* the same configuration for both layers */
+       mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
+
+       val |= MXR_GRP_CFG_BLEND_PRE_MUL;
+       val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
+       mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
+
+       /* configuration of Video Processor Registers */
+       vp_win_reset(ctx);
+       vp_default_filter(res);
+
+       /* disable all layers */
+       mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
+       mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
+       mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
+
+       mixer_vsync_set_update(ctx, true);
+       spin_unlock_irqrestore(&res->reg_slock, flags);
+}
+
+static void mixer_resource_poweron(struct mixer_context *ctx)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       clk_enable(res->mixer);
+       clk_enable(res->vp);
+       clk_enable(res->sclk_mixer);
+
+       mixer_win_reset(ctx);
+}
+
+static void mixer_resource_poweroff(struct mixer_context *ctx)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       clk_disable(res->mixer);
+       clk_disable(res->vp);
+       clk_disable(res->sclk_mixer);
+}
+
+static int mixer_runtime_resume(struct device *dev)
+{
+       struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
+
+       DRM_DEBUG_KMS("resume - start\n");
+
+       mixer_resource_poweron((struct mixer_context *)ctx->ctx);
+
+       return 0;
+}
+
+static int mixer_runtime_suspend(struct device *dev)
+{
+       struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
+
+       DRM_DEBUG_KMS("suspend - start\n");
+
+       mixer_resource_poweroff((struct mixer_context *)ctx->ctx);
+
+       return 0;
+}
+
+static const struct dev_pm_ops mixer_pm_ops = {
+       .runtime_suspend = mixer_runtime_suspend,
+       .runtime_resume  = mixer_runtime_resume,
+};
+
+static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
+                                struct platform_device *pdev)
+{
+       struct mixer_context *mixer_ctx =
+                       (struct mixer_context *)ctx->ctx;
+       struct device *dev = &pdev->dev;
+       struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
+       struct resource *res;
+       int ret;
+
+       mixer_res->dev = dev;
+       spin_lock_init(&mixer_res->reg_slock);
+
+       mixer_res->mixer = clk_get(dev, "mixer");
+       if (IS_ERR_OR_NULL(mixer_res->mixer)) {
+               dev_err(dev, "failed to get clock 'mixer'\n");
+               ret = -ENODEV;
+               goto fail;
+       }
+       mixer_res->vp = clk_get(dev, "vp");
+       if (IS_ERR_OR_NULL(mixer_res->vp)) {
+               dev_err(dev, "failed to get clock 'vp'\n");
+               ret = -ENODEV;
+               goto fail;
+       }
+       mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer");
+       if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
+               dev_err(dev, "failed to get clock 'sclk_mixer'\n");
+               ret = -ENODEV;
+               goto fail;
+       }
+       mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+       if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
+               dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
+               ret = -ENODEV;
+               goto fail;
+       }
+       mixer_res->sclk_dac = clk_get(dev, "sclk_dac");
+       if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
+               dev_err(dev, "failed to get clock 'sclk_dac'\n");
+               ret = -ENODEV;
+               goto fail;
+       }
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mxr");
+       if (res == NULL) {
+               dev_err(dev, "get memory resource failed.\n");
+               ret = -ENXIO;
+               goto fail;
+       }
+
+       clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi);
+
+       mixer_res->mixer_regs = ioremap(res->start, resource_size(res));
+       if (mixer_res->mixer_regs == NULL) {
+               dev_err(dev, "register mapping failed.\n");
+               ret = -ENXIO;
+               goto fail;
+       }
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vp");
+       if (res == NULL) {
+               dev_err(dev, "get memory resource failed.\n");
+               ret = -ENXIO;
+               goto fail_mixer_regs;
+       }
+
+       mixer_res->vp_regs = ioremap(res->start, resource_size(res));
+       if (mixer_res->vp_regs == NULL) {
+               dev_err(dev, "register mapping failed.\n");
+               ret = -ENXIO;
+               goto fail_mixer_regs;
+       }
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq");
+       if (res == NULL) {
+               dev_err(dev, "get interrupt resource failed.\n");
+               ret = -ENXIO;
+               goto fail_vp_regs;
+       }
+
+       ret = request_irq(res->start, mixer_irq_handler, 0, "drm_mixer", ctx);
+       if (ret) {
+               dev_err(dev, "request interrupt failed.\n");
+               goto fail_vp_regs;
+       }
+       mixer_res->irq = res->start;
+
+       return 0;
+
+fail_vp_regs:
+       iounmap(mixer_res->vp_regs);
+
+fail_mixer_regs:
+       iounmap(mixer_res->mixer_regs);
+
+fail:
+       if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
+               clk_put(mixer_res->sclk_dac);
+       if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
+               clk_put(mixer_res->sclk_hdmi);
+       if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
+               clk_put(mixer_res->sclk_mixer);
+       if (!IS_ERR_OR_NULL(mixer_res->vp))
+               clk_put(mixer_res->vp);
+       if (!IS_ERR_OR_NULL(mixer_res->mixer))
+               clk_put(mixer_res->mixer);
+       mixer_res->dev = NULL;
+       return ret;
+}
+
+static void mixer_resources_cleanup(struct mixer_context *ctx)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+
+       disable_irq(res->irq);
+       free_irq(res->irq, ctx);
+
+       iounmap(res->vp_regs);
+       iounmap(res->mixer_regs);
+}
+
+static int __devinit mixer_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+       struct mixer_context *ctx;
+       int ret;
+
+       dev_info(dev, "probe start\n");
+
+       drm_hdmi_ctx = kzalloc(sizeof(*drm_hdmi_ctx), GFP_KERNEL);
+       if (!drm_hdmi_ctx) {
+               DRM_ERROR("failed to allocate common hdmi context.\n");
+               return -ENOMEM;
+       }
+
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx) {
+               DRM_ERROR("failed to alloc mixer context.\n");
+               kfree(drm_hdmi_ctx);
+               return -ENOMEM;
+       }
+
+       drm_hdmi_ctx->ctx = (void *)ctx;
+
+       platform_set_drvdata(pdev, drm_hdmi_ctx);
+
+       /* acquire resources: regs, irqs, clocks */
+       ret = mixer_resources_init(drm_hdmi_ctx, pdev);
+       if (ret)
+               goto fail;
+
+       /* register specific callback point to common hdmi. */
+       exynos_drm_overlay_ops_register(&overlay_ops);
+
+       mixer_resource_poweron(ctx);
+
+       return 0;
+
+
+fail:
+       dev_info(dev, "probe failed\n");
+       return ret;
+}
+
+static int mixer_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct exynos_drm_hdmi_context *drm_hdmi_ctx =
+                                       platform_get_drvdata(pdev);
+       struct mixer_context *ctx = (struct mixer_context *)drm_hdmi_ctx->ctx;
+
+       dev_info(dev, "remove sucessful\n");
+
+       mixer_resource_poweroff(ctx);
+       mixer_resources_cleanup(ctx);
+
+       return 0;
+}
+
+struct platform_driver mixer_driver = {
+       .driver = {
+               .name = "s5p-mixer",
+               .owner = THIS_MODULE,
+               .pm = &mixer_pm_ops,
+       },
+       .probe = mixer_probe,
+       .remove = __devexit_p(mixer_remove),
+};
+EXPORT_SYMBOL(mixer_driver);
+
+MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>");
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_DESCRIPTION("Samsung DRM HDMI mixer Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.h b/drivers/gpu/drm/exynos/exynos_mixer.h
new file mode 100644 (file)
index 0000000..cebacfe
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *     Seung-Woo Kim <sw0312.kim@samsung.com>
+ *     Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_MIXER_H_
+#define _EXYNOS_MIXER_H_
+
+#define HDMI_OVERLAY_NUMBER    3
+
+struct hdmi_win_data {
+       dma_addr_t              dma_addr;
+       void __iomem            *vaddr;
+       dma_addr_t              chroma_dma_addr;
+       void __iomem            *chroma_vaddr;
+       uint32_t                pixel_format;
+       unsigned int            bpp;
+       unsigned int            crtc_x;
+       unsigned int            crtc_y;
+       unsigned int            crtc_width;
+       unsigned int            crtc_height;
+       unsigned int            fb_x;
+       unsigned int            fb_y;
+       unsigned int            fb_width;
+       unsigned int            fb_height;
+       unsigned int            mode_width;
+       unsigned int            mode_height;
+       unsigned int            scan_flags;
+};
+
+struct mixer_resources {
+       struct device *dev;
+       /** interrupt index */
+       int irq;
+       /** pointer to Mixer registers */
+       void __iomem *mixer_regs;
+       /** pointer to Video Processor registers */
+       void __iomem *vp_regs;
+       /** spinlock for protection of registers */
+       spinlock_t reg_slock;
+       /** other resources */
+       struct clk *mixer;
+       struct clk *vp;
+       struct clk *sclk_mixer;
+       struct clk *sclk_hdmi;
+       struct clk *sclk_dac;
+};
+
+struct mixer_context {
+       unsigned int                    default_win;
+       struct fb_videomode             *default_timing;
+       unsigned int                    default_bpp;
+
+       /** mixer interrupt */
+       unsigned int irq;
+       /** current crtc pipe for vblank */
+       int pipe;
+       /** interlace scan mode */
+       bool interlace;
+       /** vp enabled status */
+       bool vp_enabled;
+
+       /** mixer and vp resources */
+       struct mixer_resources mixer_res;
+
+       /** overlay window data */
+       struct hdmi_win_data            win_data[HDMI_OVERLAY_NUMBER];
+};
+
+#endif
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
new file mode 100644 (file)
index 0000000..72e6b52
--- /dev/null
@@ -0,0 +1,147 @@
+/*
+ *
+ *  Cloned from drivers/media/video/s5p-tv/regs-hdmi.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * HDMI register header file for Samsung TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef SAMSUNG_REGS_HDMI_H
+#define SAMSUNG_REGS_HDMI_H
+
+/*
+ * Register part
+*/
+
+#define HDMI_CTRL_BASE(x)              ((x) + 0x00000000)
+#define HDMI_CORE_BASE(x)              ((x) + 0x00010000)
+#define HDMI_TG_BASE(x)                        ((x) + 0x00050000)
+
+/* Control registers */
+#define HDMI_INTC_CON                  HDMI_CTRL_BASE(0x0000)
+#define HDMI_INTC_FLAG                 HDMI_CTRL_BASE(0x0004)
+#define HDMI_HPD_STATUS                        HDMI_CTRL_BASE(0x000C)
+#define HDMI_PHY_RSTOUT                        HDMI_CTRL_BASE(0x0014)
+#define HDMI_PHY_VPLL                  HDMI_CTRL_BASE(0x0018)
+#define HDMI_PHY_CMU                   HDMI_CTRL_BASE(0x001C)
+#define HDMI_CORE_RSTOUT               HDMI_CTRL_BASE(0x0020)
+
+/* Core registers */
+#define HDMI_CON_0                     HDMI_CORE_BASE(0x0000)
+#define HDMI_CON_1                     HDMI_CORE_BASE(0x0004)
+#define HDMI_CON_2                     HDMI_CORE_BASE(0x0008)
+#define HDMI_SYS_STATUS                        HDMI_CORE_BASE(0x0010)
+#define HDMI_PHY_STATUS                        HDMI_CORE_BASE(0x0014)
+#define HDMI_STATUS_EN                 HDMI_CORE_BASE(0x0020)
+#define HDMI_HPD                       HDMI_CORE_BASE(0x0030)
+#define HDMI_MODE_SEL                  HDMI_CORE_BASE(0x0040)
+#define HDMI_BLUE_SCREEN_0             HDMI_CORE_BASE(0x0050)
+#define HDMI_BLUE_SCREEN_1             HDMI_CORE_BASE(0x0054)
+#define HDMI_BLUE_SCREEN_2             HDMI_CORE_BASE(0x0058)
+#define HDMI_H_BLANK_0                 HDMI_CORE_BASE(0x00A0)
+#define HDMI_H_BLANK_1                 HDMI_CORE_BASE(0x00A4)
+#define HDMI_V_BLANK_0                 HDMI_CORE_BASE(0x00B0)
+#define HDMI_V_BLANK_1                 HDMI_CORE_BASE(0x00B4)
+#define HDMI_V_BLANK_2                 HDMI_CORE_BASE(0x00B8)
+#define HDMI_H_V_LINE_0                        HDMI_CORE_BASE(0x00C0)
+#define HDMI_H_V_LINE_1                        HDMI_CORE_BASE(0x00C4)
+#define HDMI_H_V_LINE_2                        HDMI_CORE_BASE(0x00C8)
+#define HDMI_VSYNC_POL                 HDMI_CORE_BASE(0x00E4)
+#define HDMI_INT_PRO_MODE              HDMI_CORE_BASE(0x00E8)
+#define HDMI_V_BLANK_F_0               HDMI_CORE_BASE(0x0110)
+#define HDMI_V_BLANK_F_1               HDMI_CORE_BASE(0x0114)
+#define HDMI_V_BLANK_F_2               HDMI_CORE_BASE(0x0118)
+#define HDMI_H_SYNC_GEN_0              HDMI_CORE_BASE(0x0120)
+#define HDMI_H_SYNC_GEN_1              HDMI_CORE_BASE(0x0124)
+#define HDMI_H_SYNC_GEN_2              HDMI_CORE_BASE(0x0128)
+#define HDMI_V_SYNC_GEN_1_0            HDMI_CORE_BASE(0x0130)
+#define HDMI_V_SYNC_GEN_1_1            HDMI_CORE_BASE(0x0134)
+#define HDMI_V_SYNC_GEN_1_2            HDMI_CORE_BASE(0x0138)
+#define HDMI_V_SYNC_GEN_2_0            HDMI_CORE_BASE(0x0140)
+#define HDMI_V_SYNC_GEN_2_1            HDMI_CORE_BASE(0x0144)
+#define HDMI_V_SYNC_GEN_2_2            HDMI_CORE_BASE(0x0148)
+#define HDMI_V_SYNC_GEN_3_0            HDMI_CORE_BASE(0x0150)
+#define HDMI_V_SYNC_GEN_3_1            HDMI_CORE_BASE(0x0154)
+#define HDMI_V_SYNC_GEN_3_2            HDMI_CORE_BASE(0x0158)
+#define HDMI_ACR_CON                   HDMI_CORE_BASE(0x0180)
+#define HDMI_AVI_CON                   HDMI_CORE_BASE(0x0300)
+#define HDMI_AVI_BYTE(n)               HDMI_CORE_BASE(0x0320 + 4 * (n))
+#define HDMI_DC_CONTROL                        HDMI_CORE_BASE(0x05C0)
+#define HDMI_VIDEO_PATTERN_GEN         HDMI_CORE_BASE(0x05C4)
+#define HDMI_HPD_GEN                   HDMI_CORE_BASE(0x05C8)
+#define HDMI_AUI_CON                   HDMI_CORE_BASE(0x0360)
+#define HDMI_SPD_CON                   HDMI_CORE_BASE(0x0400)
+
+/* Timing generator registers */
+#define HDMI_TG_CMD                    HDMI_TG_BASE(0x0000)
+#define HDMI_TG_H_FSZ_L                        HDMI_TG_BASE(0x0018)
+#define HDMI_TG_H_FSZ_H                        HDMI_TG_BASE(0x001C)
+#define HDMI_TG_HACT_ST_L              HDMI_TG_BASE(0x0020)
+#define HDMI_TG_HACT_ST_H              HDMI_TG_BASE(0x0024)
+#define HDMI_TG_HACT_SZ_L              HDMI_TG_BASE(0x0028)
+#define HDMI_TG_HACT_SZ_H              HDMI_TG_BASE(0x002C)
+#define HDMI_TG_V_FSZ_L                        HDMI_TG_BASE(0x0030)
+#define HDMI_TG_V_FSZ_H                        HDMI_TG_BASE(0x0034)
+#define HDMI_TG_VSYNC_L                        HDMI_TG_BASE(0x0038)
+#define HDMI_TG_VSYNC_H                        HDMI_TG_BASE(0x003C)
+#define HDMI_TG_VSYNC2_L               HDMI_TG_BASE(0x0040)
+#define HDMI_TG_VSYNC2_H               HDMI_TG_BASE(0x0044)
+#define HDMI_TG_VACT_ST_L              HDMI_TG_BASE(0x0048)
+#define HDMI_TG_VACT_ST_H              HDMI_TG_BASE(0x004C)
+#define HDMI_TG_VACT_SZ_L              HDMI_TG_BASE(0x0050)
+#define HDMI_TG_VACT_SZ_H              HDMI_TG_BASE(0x0054)
+#define HDMI_TG_FIELD_CHG_L            HDMI_TG_BASE(0x0058)
+#define HDMI_TG_FIELD_CHG_H            HDMI_TG_BASE(0x005C)
+#define HDMI_TG_VACT_ST2_L             HDMI_TG_BASE(0x0060)
+#define HDMI_TG_VACT_ST2_H             HDMI_TG_BASE(0x0064)
+#define HDMI_TG_VSYNC_TOP_HDMI_L       HDMI_TG_BASE(0x0078)
+#define HDMI_TG_VSYNC_TOP_HDMI_H       HDMI_TG_BASE(0x007C)
+#define HDMI_TG_VSYNC_BOT_HDMI_L       HDMI_TG_BASE(0x0080)
+#define HDMI_TG_VSYNC_BOT_HDMI_H       HDMI_TG_BASE(0x0084)
+#define HDMI_TG_FIELD_TOP_HDMI_L       HDMI_TG_BASE(0x0088)
+#define HDMI_TG_FIELD_TOP_HDMI_H       HDMI_TG_BASE(0x008C)
+#define HDMI_TG_FIELD_BOT_HDMI_L       HDMI_TG_BASE(0x0090)
+#define HDMI_TG_FIELD_BOT_HDMI_H       HDMI_TG_BASE(0x0094)
+
+/*
+ * Bit definition part
+ */
+
+/* HDMI_INTC_CON */
+#define HDMI_INTC_EN_GLOBAL            (1 << 6)
+#define HDMI_INTC_EN_HPD_PLUG          (1 << 3)
+#define HDMI_INTC_EN_HPD_UNPLUG                (1 << 2)
+
+/* HDMI_INTC_FLAG */
+#define HDMI_INTC_FLAG_HPD_PLUG                (1 << 3)
+#define HDMI_INTC_FLAG_HPD_UNPLUG      (1 << 2)
+
+/* HDMI_PHY_RSTOUT */
+#define HDMI_PHY_SW_RSTOUT             (1 << 0)
+
+/* HDMI_CORE_RSTOUT */
+#define HDMI_CORE_SW_RSTOUT            (1 << 0)
+
+/* HDMI_CON_0 */
+#define HDMI_BLUE_SCR_EN               (1 << 5)
+#define HDMI_EN                                (1 << 0)
+
+/* HDMI_PHY_STATUS */
+#define HDMI_PHY_STATUS_READY          (1 << 0)
+
+/* HDMI_MODE_SEL */
+#define HDMI_MODE_HDMI_EN              (1 << 1)
+#define HDMI_MODE_DVI_EN               (1 << 0)
+#define HDMI_MODE_MASK                 (3 << 0)
+
+/* HDMI_TG_CMD */
+#define HDMI_TG_EN                     (1 << 0)
+#define HDMI_FIELD_EN                  (1 << 1)
+
+#endif /* SAMSUNG_REGS_HDMI_H */
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
new file mode 100644 (file)
index 0000000..fd2f4d1
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ *
+ *  Cloned from drivers/media/video/s5p-tv/regs-mixer.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Mixer register header file for Samsung Mixer driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#ifndef SAMSUNG_REGS_MIXER_H
+#define SAMSUNG_REGS_MIXER_H
+
+/*
+ * Register part
+ */
+#define MXR_STATUS                     0x0000
+#define MXR_CFG                                0x0004
+#define MXR_INT_EN                     0x0008
+#define MXR_INT_STATUS                 0x000C
+#define MXR_LAYER_CFG                  0x0010
+#define MXR_VIDEO_CFG                  0x0014
+#define MXR_GRAPHIC0_CFG               0x0020
+#define MXR_GRAPHIC0_BASE              0x0024
+#define MXR_GRAPHIC0_SPAN              0x0028
+#define MXR_GRAPHIC0_SXY               0x002C
+#define MXR_GRAPHIC0_WH                        0x0030
+#define MXR_GRAPHIC0_DXY               0x0034
+#define MXR_GRAPHIC0_BLANK             0x0038
+#define MXR_GRAPHIC1_CFG               0x0040
+#define MXR_GRAPHIC1_BASE              0x0044
+#define MXR_GRAPHIC1_SPAN              0x0048
+#define MXR_GRAPHIC1_SXY               0x004C
+#define MXR_GRAPHIC1_WH                        0x0050
+#define MXR_GRAPHIC1_DXY               0x0054
+#define MXR_GRAPHIC1_BLANK             0x0058
+#define MXR_BG_CFG                     0x0060
+#define MXR_BG_COLOR0                  0x0064
+#define MXR_BG_COLOR1                  0x0068
+#define MXR_BG_COLOR2                  0x006C
+#define MXR_CM_COEFF_Y                 0x0080
+#define MXR_CM_COEFF_CB                        0x0084
+#define MXR_CM_COEFF_CR                        0x0088
+#define MXR_GRAPHIC0_BASE_S            0x2024
+#define MXR_GRAPHIC1_BASE_S            0x2044
+
+/* for parametrized access to layer registers */
+#define MXR_GRAPHIC_CFG(i)             (0x0020 + (i) * 0x20)
+#define MXR_GRAPHIC_BASE(i)            (0x0024 + (i) * 0x20)
+#define MXR_GRAPHIC_SPAN(i)            (0x0028 + (i) * 0x20)
+#define MXR_GRAPHIC_SXY(i)             (0x002C + (i) * 0x20)
+#define MXR_GRAPHIC_WH(i)              (0x0030 + (i) * 0x20)
+#define MXR_GRAPHIC_DXY(i)             (0x0034 + (i) * 0x20)
+#define MXR_GRAPHIC_BLANK(i)           (0x0038 + (i) * 0x20)
+#define MXR_GRAPHIC_BASE_S(i)          (0x2024 + (i) * 0x20)
+
+/*
+ * Bit definition part
+ */
+
+/* generates mask for range of bits */
+#define MXR_MASK(high_bit, low_bit) \
+       (((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
+
+#define MXR_MASK_VAL(val, high_bit, low_bit) \
+       (((val) << (low_bit)) & MXR_MASK(high_bit, low_bit))
+
+/* bits for MXR_STATUS */
+#define MXR_STATUS_16_BURST            (1 << 7)
+#define MXR_STATUS_BURST_MASK          (1 << 7)
+#define MXR_STATUS_BIG_ENDIAN          (1 << 3)
+#define MXR_STATUS_ENDIAN_MASK         (1 << 3)
+#define MXR_STATUS_SYNC_ENABLE         (1 << 2)
+#define MXR_STATUS_REG_RUN             (1 << 0)
+
+/* bits for MXR_CFG */
+#define MXR_CFG_RGB601_0_255           (0 << 9)
+#define MXR_CFG_RGB601_16_235          (1 << 9)
+#define MXR_CFG_RGB709_0_255           (2 << 9)
+#define MXR_CFG_RGB709_16_235          (3 << 9)
+#define MXR_CFG_RGB_FMT_MASK           0x600
+#define MXR_CFG_OUT_YUV444             (0 << 8)
+#define MXR_CFG_OUT_RGB888             (1 << 8)
+#define MXR_CFG_OUT_MASK               (1 << 8)
+#define MXR_CFG_DST_SDO                        (0 << 7)
+#define MXR_CFG_DST_HDMI               (1 << 7)
+#define MXR_CFG_DST_MASK               (1 << 7)
+#define MXR_CFG_SCAN_HD_720            (0 << 6)
+#define MXR_CFG_SCAN_HD_1080           (1 << 6)
+#define MXR_CFG_GRP1_ENABLE            (1 << 5)
+#define MXR_CFG_GRP0_ENABLE            (1 << 4)
+#define MXR_CFG_VP_ENABLE              (1 << 3)
+#define MXR_CFG_SCAN_INTERLACE         (0 << 2)
+#define MXR_CFG_SCAN_PROGRASSIVE       (1 << 2)
+#define MXR_CFG_SCAN_NTSC              (0 << 1)
+#define MXR_CFG_SCAN_PAL               (1 << 1)
+#define MXR_CFG_SCAN_SD                        (0 << 0)
+#define MXR_CFG_SCAN_HD                        (1 << 0)
+#define MXR_CFG_SCAN_MASK              0x47
+
+/* bits for MXR_GRAPHICn_CFG */
+#define MXR_GRP_CFG_COLOR_KEY_DISABLE  (1 << 21)
+#define MXR_GRP_CFG_BLEND_PRE_MUL      (1 << 20)
+#define MXR_GRP_CFG_WIN_BLEND_EN       (1 << 17)
+#define MXR_GRP_CFG_PIXEL_BLEND_EN     (1 << 16)
+#define MXR_GRP_CFG_FORMAT_VAL(x)      MXR_MASK_VAL(x, 11, 8)
+#define MXR_GRP_CFG_FORMAT_MASK                MXR_GRP_CFG_FORMAT_VAL(~0)
+#define MXR_GRP_CFG_ALPHA_VAL(x)       MXR_MASK_VAL(x, 7, 0)
+
+/* bits for MXR_GRAPHICn_WH */
+#define MXR_GRP_WH_H_SCALE(x)          MXR_MASK_VAL(x, 28, 28)
+#define MXR_GRP_WH_V_SCALE(x)          MXR_MASK_VAL(x, 12, 12)
+#define MXR_GRP_WH_WIDTH(x)            MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_WH_HEIGHT(x)           MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_GRAPHICn_SXY */
+#define MXR_GRP_SXY_SX(x)              MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_SXY_SY(x)              MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_GRAPHICn_DXY */
+#define MXR_GRP_DXY_DX(x)              MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_DXY_DY(x)              MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_INT_EN */
+#define MXR_INT_EN_VSYNC               (1 << 11)
+#define MXR_INT_EN_ALL                 (0x0f << 8)
+
+/* bit for MXR_INT_STATUS */
+#define MXR_INT_CLEAR_VSYNC            (1 << 11)
+#define MXR_INT_STATUS_VSYNC           (1 << 0)
+
+/* bit for MXR_LAYER_CFG */
+#define MXR_LAYER_CFG_GRP1_VAL(x)      MXR_MASK_VAL(x, 11, 8)
+#define MXR_LAYER_CFG_GRP0_VAL(x)      MXR_MASK_VAL(x, 7, 4)
+#define MXR_LAYER_CFG_VP_VAL(x)                MXR_MASK_VAL(x, 3, 0)
+
+#endif /* SAMSUNG_REGS_MIXER_H */
+
diff --git a/drivers/gpu/drm/exynos/regs-vp.h b/drivers/gpu/drm/exynos/regs-vp.h
new file mode 100644 (file)
index 0000000..10b737a
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ *
+ *  Cloned from drivers/media/video/s5p-tv/regs-vp.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com/
+ *
+ * Video processor register header file for Samsung Mixer driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SAMSUNG_REGS_VP_H
+#define SAMSUNG_REGS_VP_H
+
+/*
+ * Register part
+ */
+
+#define VP_ENABLE                      0x0000
+#define VP_SRESET                      0x0004
+#define VP_SHADOW_UPDATE               0x0008
+#define VP_FIELD_ID                    0x000C
+#define VP_MODE                                0x0010
+#define VP_IMG_SIZE_Y                  0x0014
+#define VP_IMG_SIZE_C                  0x0018
+#define VP_PER_RATE_CTRL               0x001C
+#define VP_TOP_Y_PTR                   0x0028
+#define VP_BOT_Y_PTR                   0x002C
+#define VP_TOP_C_PTR                   0x0030
+#define VP_BOT_C_PTR                   0x0034
+#define VP_ENDIAN_MODE                 0x03CC
+#define VP_SRC_H_POSITION              0x0044
+#define VP_SRC_V_POSITION              0x0048
+#define VP_SRC_WIDTH                   0x004C
+#define VP_SRC_HEIGHT                  0x0050
+#define VP_DST_H_POSITION              0x0054
+#define VP_DST_V_POSITION              0x0058
+#define VP_DST_WIDTH                   0x005C
+#define VP_DST_HEIGHT                  0x0060
+#define VP_H_RATIO                     0x0064
+#define VP_V_RATIO                     0x0068
+#define VP_POLY8_Y0_LL                 0x006C
+#define VP_POLY4_Y0_LL                 0x00EC
+#define VP_POLY4_C0_LL                 0x012C
+
+/*
+ * Bit definition part
+ */
+
+/* generates mask for range of bits */
+
+#define VP_MASK(high_bit, low_bit) \
+       (((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
+
+#define VP_MASK_VAL(val, high_bit, low_bit) \
+       (((val) << (low_bit)) & VP_MASK(high_bit, low_bit))
+
+ /* VP_ENABLE */
+#define VP_ENABLE_ON                   (1 << 0)
+
+/* VP_SRESET */
+#define VP_SRESET_PROCESSING           (1 << 0)
+
+/* VP_SHADOW_UPDATE */
+#define VP_SHADOW_UPDATE_ENABLE                (1 << 0)
+
+/* VP_MODE */
+#define VP_MODE_NV12                   (0 << 6)
+#define VP_MODE_NV21                   (1 << 6)
+#define VP_MODE_LINE_SKIP              (1 << 5)
+#define VP_MODE_MEM_LINEAR             (0 << 4)
+#define VP_MODE_MEM_TILED              (1 << 4)
+#define VP_MODE_FMT_MASK               (5 << 4)
+#define VP_MODE_FIELD_ID_AUTO_TOGGLING (1 << 2)
+#define VP_MODE_2D_IPC                 (1 << 1)
+
+/* VP_IMG_SIZE_Y */
+/* VP_IMG_SIZE_C */
+#define VP_IMG_HSIZE(x)                        VP_MASK_VAL(x, 29, 16)
+#define VP_IMG_VSIZE(x)                        VP_MASK_VAL(x, 13, 0)
+
+/* VP_SRC_H_POSITION */
+#define VP_SRC_H_POSITION_VAL(x)       VP_MASK_VAL(x, 14, 4)
+
+/* VP_ENDIAN_MODE */
+#define VP_ENDIAN_MODE_LITTLE          (1 << 0)
+
+#endif /* SAMSUNG_REGS_VP_H */
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
new file mode 100644 (file)
index 0000000..754e14b
--- /dev/null
@@ -0,0 +1,27 @@
+config DRM_GMA500
+       tristate "Intel GMA5/600 KMS Framebuffer"
+       depends on DRM && PCI && X86 && EXPERIMENTAL
+       select FB_CFB_COPYAREA
+        select FB_CFB_FILLRECT
+        select FB_CFB_IMAGEBLIT
+        select DRM_KMS_HELPER
+        select DRM_TTM
+       help
+         Say yes for an experimental 2D KMS framebuffer driver for the
+         Intel GMA500 ('Poulsbo') and other Intel IMG based graphics
+         devices.
+
+config DRM_GMA600
+       bool "Intel GMA600 support (Experimental)"
+       depends on DRM_GMA500
+       help
+         Say yes to include support for GMA600 (Intel Moorestown/Oaktrail)
+         platforms with LVDS ports. HDMI and MIPI are not currently
+         supported.
+
+config DRM_GMA3600
+       bool "Intel GMA3600/3650 support (Experimental)"
+       depends on DRM_GMA500
+       help
+         Say yes to include basic support for Intel GMA3600/3650 (Intel
+         Cedar Trail) platforms.
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
new file mode 100644 (file)
index 0000000..81c103b
--- /dev/null
@@ -0,0 +1,40 @@
+#
+#      KMS driver for the GMA500
+#
+ccflags-y += -Iinclude/drm
+
+gma500_gfx-y += gem_glue.o \
+         accel_2d.o \
+         backlight.o \
+         framebuffer.o \
+         gem.o \
+         gtt.o \
+         intel_bios.o \
+         intel_i2c.o \
+         intel_gmbus.o \
+         intel_opregion.o \
+         mmu.o \
+         power.o \
+         psb_drv.o \
+         psb_intel_display.o \
+         psb_intel_lvds.o \
+         psb_intel_modes.o \
+         psb_intel_sdvo.o \
+         psb_lid.o \
+         psb_irq.o \
+         psb_device.o \
+         mid_bios.o
+
+gma500_gfx-$(CONFIG_DRM_GMA3600) +=  cdv_device.o \
+         cdv_intel_crt.o \
+         cdv_intel_display.o \
+         cdv_intel_hdmi.o \
+         cdv_intel_lvds.o
+
+gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
+         oaktrail_crtc.o \
+         oaktrail_lvds.o \
+         oaktrail_hdmi.o \
+         oaktrail_hdmi_i2c.o
+
+obj-$(CONFIG_DRM_GMA500) += gma500_gfx.o
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
new file mode 100644 (file)
index 0000000..d5ef1a5
--- /dev/null
@@ -0,0 +1,364 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/console.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "framebuffer.h"
+
+/**
+ *     psb_spank               -       reset the 2D engine
+ *     @dev_priv: our PSB DRM device
+ *
+ *     Soft reset the graphics engine and then reload the necessary registers.
+ *     We use this at initialisation time but it will become relevant for
+ *     accelerated X later
+ */
+void psb_spank(struct drm_psb_private *dev_priv)
+{
+       PSB_WSGX32(_PSB_CS_RESET_BIF_RESET | _PSB_CS_RESET_DPM_RESET |
+               _PSB_CS_RESET_TA_RESET | _PSB_CS_RESET_USE_RESET |
+               _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET |
+               _PSB_CS_RESET_TWOD_RESET, PSB_CR_SOFT_RESET);
+       PSB_RSGX32(PSB_CR_SOFT_RESET);
+
+       msleep(1);
+
+       PSB_WSGX32(0, PSB_CR_SOFT_RESET);
+       wmb();
+       PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
+                  PSB_CR_BIF_CTRL);
+       wmb();
+       (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
+
+       msleep(1);
+       PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
+                  PSB_CR_BIF_CTRL);
+       (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
+       PSB_WSGX32(dev_priv->gtt.gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
+}
+
+/**
+ *     psb2_2d_wait_available  -       wait for FIFO room
+ *     @dev_priv: our DRM device
+ *     @size: size (in dwords) of the command we want to issue
+ *
+ *     Wait until there is room to load the FIFO with our data. If the
+ *     device is not responding then reset it
+ */
+static int psb_2d_wait_available(struct drm_psb_private *dev_priv,
+                         unsigned size)
+{
+       uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
+       unsigned long t = jiffies + HZ;
+
+       while (avail < size) {
+               avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
+               if (time_after(jiffies, t)) {
+                       psb_spank(dev_priv);
+                       return -EIO;
+               }
+       }
+       return 0;
+}
+
+/**
+ *     psb_2d_submit           -       submit a 2D command
+ *     @dev_priv: our DRM device
+ *     @cmdbuf: command to issue
+ *     @size: length (in dwords)
+ *
+ *     Issue one or more 2D commands to the accelerator. This needs to be
+ *     serialized later when we add the GEM interfaces for acceleration
+ */
+static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
+                                                               unsigned size)
+{
+       int ret = 0;
+       int i;
+       unsigned submit_size;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev_priv->lock_2d, flags);
+       while (size > 0) {
+               submit_size = (size < 0x60) ? size : 0x60;
+               size -= submit_size;
+               ret = psb_2d_wait_available(dev_priv, submit_size);
+               if (ret)
+                       break;
+
+               submit_size <<= 2;
+
+               for (i = 0; i < submit_size; i += 4)
+                       PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
+
+               (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
+       }
+       spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
+       return ret;
+}
+
+
+/**
+ *     psb_accel_2d_copy_direction     -       compute blit order
+ *     @xdir: X direction of move
+ *     @ydir: Y direction of move
+ *
+ *     Compute the correct order setings to ensure that an overlapping blit
+ *     correctly copies all the pixels.
+ */
+static u32 psb_accel_2d_copy_direction(int xdir, int ydir)
+{
+       if (xdir < 0)
+               return (ydir < 0) ? PSB_2D_COPYORDER_BR2TL :
+                                               PSB_2D_COPYORDER_TR2BL;
+       else
+               return (ydir < 0) ? PSB_2D_COPYORDER_BL2TR :
+                                               PSB_2D_COPYORDER_TL2BR;
+}
+
+/**
+ *     psb_accel_2d_copy               -       accelerated 2D copy
+ *     @dev_priv: our DRM device
+ *     @src_offset in bytes
+ *     @src_stride in bytes
+ *     @src_format psb 2D format defines
+ *     @dst_offset in bytes
+ *     @dst_stride in bytes
+ *     @dst_format psb 2D format defines
+ *     @src_x offset in pixels
+ *     @src_y offset in pixels
+ *     @dst_x offset in pixels
+ *     @dst_y offset in pixels
+ *     @size_x of the copied area
+ *     @size_y of the copied area
+ *
+ *     Format and issue a 2D accelerated copy command.
+ */
+static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
+                            uint32_t src_offset, uint32_t src_stride,
+                            uint32_t src_format, uint32_t dst_offset,
+                            uint32_t dst_stride, uint32_t dst_format,
+                            uint16_t src_x, uint16_t src_y,
+                            uint16_t dst_x, uint16_t dst_y,
+                            uint16_t size_x, uint16_t size_y)
+{
+       uint32_t blit_cmd;
+       uint32_t buffer[10];
+       uint32_t *buf;
+       uint32_t direction;
+
+       buf = buffer;
+
+       direction =
+           psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
+
+       if (direction == PSB_2D_COPYORDER_BR2TL ||
+           direction == PSB_2D_COPYORDER_TR2BL) {
+               src_x += size_x - 1;
+               dst_x += size_x - 1;
+       }
+       if (direction == PSB_2D_COPYORDER_BR2TL ||
+           direction == PSB_2D_COPYORDER_BL2TR) {
+               src_y += size_y - 1;
+               dst_y += size_y - 1;
+       }
+
+       blit_cmd =
+           PSB_2D_BLIT_BH |
+           PSB_2D_ROT_NONE |
+           PSB_2D_DSTCK_DISABLE |
+           PSB_2D_SRCCK_DISABLE |
+           PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
+
+       *buf++ = PSB_2D_FENCE_BH;
+       *buf++ =
+           PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
+                                              PSB_2D_DST_STRIDE_SHIFT);
+       *buf++ = dst_offset;
+       *buf++ =
+           PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
+                                              PSB_2D_SRC_STRIDE_SHIFT);
+       *buf++ = src_offset;
+       *buf++ =
+           PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) |
+           (src_y << PSB_2D_SRCOFF_YSTART_SHIFT);
+       *buf++ = blit_cmd;
+       *buf++ =
+           (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
+                                                 PSB_2D_DST_YSTART_SHIFT);
+       *buf++ =
+           (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
+                                                 PSB_2D_DST_YSIZE_SHIFT);
+       *buf++ = PSB_2D_FLUSH_BH;
+
+       return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
+}
+
+/**
+ *     psbfb_copyarea_accel    -       copyarea acceleration for /dev/fb
+ *     @info: our framebuffer
+ *     @a: copyarea parameters from the framebuffer core
+ *
+ *     Perform a 2D copy via the accelerator
+ */
+static void psbfb_copyarea_accel(struct fb_info *info,
+                                const struct fb_copyarea *a)
+{
+       struct psb_fbdev *fbdev = info->par;
+       struct psb_framebuffer *psbfb = &fbdev->pfb;
+       struct drm_device *dev = psbfb->base.dev;
+       struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       uint32_t offset;
+       uint32_t stride;
+       uint32_t src_format;
+       uint32_t dst_format;
+
+       if (!fb)
+               return;
+
+       offset = psbfb->gtt->offset;
+       stride = fb->pitches[0];
+
+       switch (fb->depth) {
+       case 8:
+               src_format = PSB_2D_SRC_332RGB;
+               dst_format = PSB_2D_DST_332RGB;
+               break;
+       case 15:
+               src_format = PSB_2D_SRC_555RGB;
+               dst_format = PSB_2D_DST_555RGB;
+               break;
+       case 16:
+               src_format = PSB_2D_SRC_565RGB;
+               dst_format = PSB_2D_DST_565RGB;
+               break;
+       case 24:
+       case 32:
+               /* this is wrong but since we don't do blending its okay */
+               src_format = PSB_2D_SRC_8888ARGB;
+               dst_format = PSB_2D_DST_8888ARGB;
+               break;
+       default:
+               /* software fallback */
+               cfb_copyarea(info, a);
+               return;
+       }
+
+       if (!gma_power_begin(dev, false)) {
+               cfb_copyarea(info, a);
+               return;
+       }
+       psb_accel_2d_copy(dev_priv,
+                         offset, stride, src_format,
+                         offset, stride, dst_format,
+                         a->sx, a->sy, a->dx, a->dy, a->width, a->height);
+       gma_power_end(dev);
+}
+
+/**
+ *     psbfb_copyarea  -       2D copy interface
+ *     @info: our framebuffer
+ *     @region: region to copy
+ *
+ *     Copy an area of the framebuffer console either by the accelerator
+ *     or directly using the cfb helpers according to the request
+ */
+void psbfb_copyarea(struct fb_info *info,
+                          const struct fb_copyarea *region)
+{
+       if (unlikely(info->state != FBINFO_STATE_RUNNING))
+               return;
+
+       /* Avoid the 8 pixel erratum */
+       if (region->width == 8 || region->height == 8 ||
+               (info->flags & FBINFO_HWACCEL_DISABLED))
+               return cfb_copyarea(info, region);
+
+       psbfb_copyarea_accel(info, region);
+}
+
+/**
+ *     psbfb_sync      -       synchronize 2D
+ *     @info: our framebuffer
+ *
+ *     Wait for the 2D engine to quiesce so that we can do CPU
+ *     access to the framebuffer again
+ */
+int psbfb_sync(struct fb_info *info)
+{
+       struct psb_fbdev *fbdev = info->par;
+       struct psb_framebuffer *psbfb = &fbdev->pfb;
+       struct drm_device *dev = psbfb->base.dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned long _end = jiffies + DRM_HZ;
+       int busy = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev_priv->lock_2d, flags);
+       /*
+        * First idle the 2D engine.
+        */
+
+       if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
+           ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
+               goto out;
+
+       do {
+               busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
+               cpu_relax();
+       } while (busy && !time_after_eq(jiffies, _end));
+
+       if (busy)
+               busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
+       if (busy)
+               goto out;
+
+       do {
+               busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
+                                               _PSB_C2B_STATUS_BUSY) != 0);
+               cpu_relax();
+       } while (busy && !time_after_eq(jiffies, _end));
+       if (busy)
+               busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
+                                       _PSB_C2B_STATUS_BUSY) != 0);
+
+out:
+       spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
+       return (busy) ? -EBUSY : 0;
+}
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
new file mode 100644 (file)
index 0000000..2079395
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * GMA500 Backlight Interface
+ *
+ * Copyright (c) 2009-2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Eric Knopp
+ *
+ */
+
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_drv.h"
+#include "intel_bios.h"
+#include "power.h"
+
+int gma_backlight_init(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       return dev_priv->ops->backlight_init(dev);
+#else
+       return 0;
+#endif
+}
+
+void gma_backlight_exit(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       if (dev_priv->backlight_device) {
+               dev_priv->backlight_device->props.brightness = 0;
+               backlight_update_status(dev_priv->backlight_device);
+               backlight_device_unregister(dev_priv->backlight_device);
+       }
+#endif
+}
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
new file mode 100644 (file)
index 0000000..4a5b099
--- /dev/null
@@ -0,0 +1,351 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/backlight.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+#include "cdv_device.h"
+
+#define VGA_SR_INDEX           0x3c4
+#define VGA_SR_DATA            0x3c5
+
+static void cdv_disable_vga(struct drm_device *dev)
+{
+       u8 sr1;
+       u32 vga_reg;
+
+       vga_reg = VGACNTRL;
+
+       outb(1, VGA_SR_INDEX);
+       sr1 = inb(VGA_SR_DATA);
+       outb(sr1 | 1<<5, VGA_SR_DATA);
+       udelay(300);
+
+       REG_WRITE(vga_reg, VGA_DISP_DISABLE);
+       REG_READ(vga_reg);
+}
+
+static int cdv_output_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       cdv_disable_vga(dev);
+
+       cdv_intel_crt_init(dev, &dev_priv->mode_dev);
+       cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
+
+       /* These bits indicate HDMI not SDVO on CDV, but we don't yet support
+          the HDMI interface */
+       if (REG_READ(SDVOB) & SDVO_DETECTED)
+               cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
+       if (REG_READ(SDVOC) & SDVO_DETECTED)
+               cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC);
+       return 0;
+}
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+/*
+ *     Poulsbo Backlight Interfaces
+ */
+
+#define BLC_PWM_PRECISION_FACTOR 100   /* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+
+#define PSB_BLC_PWM_PRECISION_FACTOR    10
+#define PSB_BLC_MAX_PWM_REG_FREQ        0xFFFE
+#define PSB_BLC_MIN_PWM_REG_FREQ        0x2
+
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT    (16)
+
+static int cdv_brightness;
+static struct backlight_device *cdv_backlight_device;
+
+static int cdv_get_brightness(struct backlight_device *bd)
+{
+       /* return locally cached var instead of HW read (due to DPST etc.) */
+       /* FIXME: ideally return actual value in case firmware fiddled with
+          it */
+       return cdv_brightness;
+}
+
+
+static int cdv_backlight_setup(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned long core_clock;
+       /* u32 bl_max_freq; */
+       /* unsigned long value; */
+       u16 bl_max_freq;
+       uint32_t value;
+       uint32_t blc_pwm_precision_factor;
+
+       /* get bl_max_freq and pol from dev_priv*/
+       if (!dev_priv->lvds_bl) {
+               dev_err(dev->dev, "Has no valid LVDS backlight info\n");
+               return -ENOENT;
+       }
+       bl_max_freq = dev_priv->lvds_bl->freq;
+       blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
+
+       core_clock = dev_priv->core_freq;
+
+       value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
+       value *= blc_pwm_precision_factor;
+       value /= bl_max_freq;
+       value /= blc_pwm_precision_factor;
+
+       if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
+                value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
+                               return -ERANGE;
+       else {
+               /* FIXME */
+       }
+       return 0;
+}
+
+static int cdv_set_brightness(struct backlight_device *bd)
+{
+       int level = bd->props.brightness;
+
+       /* Percentage 1-100% being valid */
+       if (level < 1)
+               level = 1;
+
+       /*cdv_intel_lvds_set_brightness(dev, level); FIXME */
+       cdv_brightness = level;
+       return 0;
+}
+
+static const struct backlight_ops cdv_ops = {
+       .get_brightness = cdv_get_brightness,
+       .update_status  = cdv_set_brightness,
+};
+
+static int cdv_backlight_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int ret;
+       struct backlight_properties props;
+
+       memset(&props, 0, sizeof(struct backlight_properties));
+       props.max_brightness = 100;
+       props.type = BACKLIGHT_PLATFORM;
+
+       cdv_backlight_device = backlight_device_register("psb-bl",
+                                       NULL, (void *)dev, &cdv_ops, &props);
+       if (IS_ERR(cdv_backlight_device))
+               return PTR_ERR(cdv_backlight_device);
+
+       ret = cdv_backlight_setup(dev);
+       if (ret < 0) {
+               backlight_device_unregister(cdv_backlight_device);
+               cdv_backlight_device = NULL;
+               return ret;
+       }
+       cdv_backlight_device->props.brightness = 100;
+       cdv_backlight_device->props.max_brightness = 100;
+       backlight_update_status(cdv_backlight_device);
+       dev_priv->backlight_device = cdv_backlight_device;
+       return 0;
+}
+
+#endif
+
+/*
+ *     Provide the Cedarview specific chip logic and low level methods
+ *     for power management
+ *
+ *     FIXME: we need to implement the apm/ospm base management bits
+ *     for this and the MID devices.
+ */
+
+static inline u32 CDV_MSG_READ32(uint port, uint offset)
+{
+       int mcr = (0x10<<24) | (port << 16) | (offset << 8);
+       uint32_t ret_val = 0;
+       struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+       pci_write_config_dword(pci_root, 0xD0, mcr);
+       pci_read_config_dword(pci_root, 0xD4, &ret_val);
+       pci_dev_put(pci_root);
+       return ret_val;
+}
+
+static inline void CDV_MSG_WRITE32(uint port, uint offset, u32 value)
+{
+       int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
+       struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+       pci_write_config_dword(pci_root, 0xD4, value);
+       pci_write_config_dword(pci_root, 0xD0, mcr);
+       pci_dev_put(pci_root);
+}
+
+#define PSB_APM_CMD                    0x0
+#define PSB_APM_STS                    0x04
+#define PSB_PM_SSC                     0x20
+#define PSB_PM_SSS                     0x30
+#define PSB_PWRGT_GFX_MASK             0x3
+#define CDV_PWRGT_DISPLAY_CNTR         0x000fc00c
+#define CDV_PWRGT_DISPLAY_STS          0x000fc00c
+
+static void cdv_init_pm(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 pwr_cnt;
+       int i;
+
+       dev_priv->apm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
+                                                       PSB_APMBA) & 0xFFFF;
+       dev_priv->ospm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
+                                                       PSB_OSPMBA) & 0xFFFF;
+
+       /* Force power on for now */
+       pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
+       pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
+
+       outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
+       for (i = 0; i < 5; i++) {
+               u32 pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
+               if ((pwr_sts & PSB_PWRGT_GFX_MASK) == 0)
+                       break;
+               udelay(10);
+       }
+       pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
+       pwr_cnt &= ~CDV_PWRGT_DISPLAY_CNTR;
+       outl(pwr_cnt, dev_priv->ospm_base + PSB_PM_SSC);
+       for (i = 0; i < 5; i++) {
+               u32 pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
+               if ((pwr_sts & CDV_PWRGT_DISPLAY_STS) == 0)
+                       break;
+               udelay(10);
+       }
+}
+
+/**
+ *     cdv_save_display_registers      -       save registers lost on suspend
+ *     @dev: our DRM device
+ *
+ *     Save the state we need in order to be able to restore the interface
+ *     upon resume from suspend
+ *
+ *     FIXME: review
+ */
+static int cdv_save_display_registers(struct drm_device *dev)
+{
+       return 0;
+}
+
+/**
+ *     cdv_restore_display_registers   -       restore lost register state
+ *     @dev: our DRM device
+ *
+ *     Restore register state that was lost during suspend and resume.
+ *
+ *     FIXME: review
+ */
+static int cdv_restore_display_registers(struct drm_device *dev)
+{
+       return 0;
+}
+
+static int cdv_power_down(struct drm_device *dev)
+{
+       return 0;
+}
+
+static int cdv_power_up(struct drm_device *dev)
+{
+       return 0;
+}
+
+/* FIXME ? - shared with Poulsbo */
+static void cdv_get_core_freq(struct drm_device *dev)
+{
+       uint32_t clock;
+       struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
+       pci_read_config_dword(pci_root, 0xD4, &clock);
+       pci_dev_put(pci_root);
+
+       switch (clock & 0x07) {
+       case 0:
+               dev_priv->core_freq = 100;
+               break;
+       case 1:
+               dev_priv->core_freq = 133;
+               break;
+       case 2:
+               dev_priv->core_freq = 150;
+               break;
+       case 3:
+               dev_priv->core_freq = 178;
+               break;
+       case 4:
+               dev_priv->core_freq = 200;
+               break;
+       case 5:
+       case 6:
+       case 7:
+               dev_priv->core_freq = 266;
+       default:
+               dev_priv->core_freq = 0;
+       }
+}
+
+static int cdv_chip_setup(struct drm_device *dev)
+{
+       cdv_get_core_freq(dev);
+       gma_intel_opregion_init(dev);
+       psb_intel_init_bios(dev);
+       return 0;
+}
+
+/* CDV is much like Poulsbo but has MID like SGX offsets and PM */
+
+const struct psb_ops cdv_chip_ops = {
+       .name = "GMA3600/3650",
+       .accel_2d = 0,
+       .pipes = 2,
+       .crtcs = 2,
+       .sgx_offset = MRST_SGX_OFFSET,
+       .chip_setup = cdv_chip_setup,
+
+       .crtc_helper = &cdv_intel_helper_funcs,
+       .crtc_funcs = &cdv_intel_crtc_funcs,
+
+       .output_init = cdv_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+       .backlight_init = cdv_backlight_init,
+#endif
+
+       .init_pm = cdv_init_pm,
+       .save_regs = cdv_save_display_registers,
+       .restore_regs = cdv_restore_display_registers,
+       .power_down = cdv_power_down,
+       .power_up = cdv_power_up,
+};
diff --git a/drivers/gpu/drm/gma500/cdv_device.h b/drivers/gpu/drm/gma500/cdv_device.h
new file mode 100644 (file)
index 0000000..2a88b7b
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright Â© 2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs;
+extern const struct drm_crtc_funcs cdv_intel_crtc_funcs;
+extern void cdv_intel_crt_init(struct drm_device *dev,
+                       struct psb_intel_mode_device *mode_dev);
+extern void cdv_intel_lvds_init(struct drm_device *dev,
+                       struct psb_intel_mode_device *mode_dev);
+extern void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev,
+                       int reg);
+extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
+                                            struct drm_crtc *crtc);
+
+extern inline void cdv_intel_wait_for_vblank(struct drm_device *dev)
+{
+       /* Wait for 20ms, i.e. one cycle at 50hz. */
+        /* FIXME: msleep ?? */
+       mdelay(20);
+}
+
+
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
new file mode 100644 (file)
index 0000000..6d0f10b
--- /dev/null
@@ -0,0 +1,333 @@
+/*
+ * Copyright Â© 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+
+
+static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+       u32 temp, reg;
+       reg = ADPA;
+
+       temp = REG_READ(reg);
+       temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+       temp &= ~ADPA_DAC_ENABLE;
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               temp |= ADPA_DAC_ENABLE;
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+               temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
+               break;
+       case DRM_MODE_DPMS_SUSPEND:
+               temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
+               break;
+       case DRM_MODE_DPMS_OFF:
+               temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
+               break;
+       }
+
+       REG_WRITE(reg, temp);
+}
+
+static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
+                               struct drm_display_mode *mode)
+{
+       int max_clock = 0;
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
+       /* The lowest clock for CDV is 20000KHz */
+       if (mode->clock < 20000)
+               return MODE_CLOCK_LOW;
+
+       /* The max clock for CDV is 355 instead of 400 */
+       max_clock = 355000;
+       if (mode->clock > max_clock)
+               return MODE_CLOCK_HIGH;
+
+       if (mode->hdisplay > 1680 || mode->vdisplay > 1050)
+               return MODE_PANEL;
+
+       return MODE_OK;
+}
+
+static bool cdv_intel_crt_mode_fixup(struct drm_encoder *encoder,
+                                struct drm_display_mode *mode,
+                                struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
+                              struct drm_display_mode *mode,
+                              struct drm_display_mode *adjusted_mode)
+{
+
+       struct drm_device *dev = encoder->dev;
+       struct drm_crtc *crtc = encoder->crtc;
+       struct psb_intel_crtc *psb_intel_crtc =
+                                       to_psb_intel_crtc(crtc);
+       int dpll_md_reg;
+       u32 adpa, dpll_md;
+       u32 adpa_reg;
+
+       if (psb_intel_crtc->pipe == 0)
+               dpll_md_reg = DPLL_A_MD;
+       else
+               dpll_md_reg = DPLL_B_MD;
+
+       adpa_reg = ADPA;
+
+       /*
+        * Disable separate mode multiplier used when cloning SDVO to CRT
+        * XXX this needs to be adjusted when we really are cloning
+        */
+       {
+               dpll_md = REG_READ(dpll_md_reg);
+               REG_WRITE(dpll_md_reg,
+                          dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
+       }
+
+       adpa = 0;
+       if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+               adpa |= ADPA_HSYNC_ACTIVE_HIGH;
+       if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+               adpa |= ADPA_VSYNC_ACTIVE_HIGH;
+
+       if (psb_intel_crtc->pipe == 0)
+               adpa |= ADPA_PIPE_A_SELECT;
+       else
+               adpa |= ADPA_PIPE_B_SELECT;
+
+       REG_WRITE(adpa_reg, adpa);
+}
+
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
+ *
+ * \return true if CRT is connected.
+ * \return false if CRT is disconnected.
+ */
+static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
+                                                               bool force)
+{
+       struct drm_device *dev = connector->dev;
+       u32 hotplug_en;
+       int i, tries = 0, ret = false;
+       u32 adpa_orig;
+
+       /* disable the DAC when doing the hotplug detection */
+
+       adpa_orig = REG_READ(ADPA);
+
+       REG_WRITE(ADPA, adpa_orig & ~(ADPA_DAC_ENABLE));
+
+       /*
+        * On a CDV thep, CRT detect sequence need to be done twice
+        * to get a reliable result.
+        */
+       tries = 2;
+
+       hotplug_en = REG_READ(PORT_HOTPLUG_EN);
+       hotplug_en &= ~(CRT_HOTPLUG_DETECT_MASK);
+       hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
+
+       hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+       hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+
+       for (i = 0; i < tries ; i++) {
+               unsigned long timeout;
+               /* turn on the FORCE_DETECT */
+               REG_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+               timeout = jiffies + msecs_to_jiffies(1000);
+               /* wait for FORCE_DETECT to go off */
+               do {
+                       if (!(REG_READ(PORT_HOTPLUG_EN) &
+                                       CRT_HOTPLUG_FORCE_DETECT))
+                               break;
+                       msleep(1);
+               } while (time_after(timeout, jiffies));
+       }
+
+       if ((REG_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) !=
+           CRT_HOTPLUG_MONITOR_NONE)
+               ret = true;
+
+       /* Restore the saved ADPA */
+       REG_WRITE(ADPA, adpa_orig);
+       return ret;
+}
+
+static enum drm_connector_status cdv_intel_crt_detect(
+                               struct drm_connector *connector, bool force)
+{
+       if (cdv_intel_crt_detect_hotplug(connector, force))
+               return connector_status_connected;
+       else
+               return connector_status_disconnected;
+}
+
+static void cdv_intel_crt_destroy(struct drm_connector *connector)
+{
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+
+       psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+       drm_sysfs_connector_remove(connector);
+       drm_connector_cleanup(connector);
+       kfree(connector);
+}
+
+static int cdv_intel_crt_get_modes(struct drm_connector *connector)
+{
+       struct psb_intel_encoder *psb_intel_encoder =
+                               psb_intel_attached_encoder(connector);
+       return psb_intel_ddc_get_modes(connector, &psb_intel_encoder->ddc_bus->adapter);
+}
+
+static int cdv_intel_crt_set_property(struct drm_connector *connector,
+                                 struct drm_property *property,
+                                 uint64_t value)
+{
+       return 0;
+}
+
+/*
+ * Routines for controlling stuff on the analog port
+ */
+
+static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
+       .dpms = cdv_intel_crt_dpms,
+       .mode_fixup = cdv_intel_crt_mode_fixup,
+       .prepare = psb_intel_encoder_prepare,
+       .commit = psb_intel_encoder_commit,
+       .mode_set = cdv_intel_crt_mode_set,
+};
+
+static const struct drm_connector_funcs cdv_intel_crt_connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .detect = cdv_intel_crt_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = cdv_intel_crt_destroy,
+       .set_property = cdv_intel_crt_set_property,
+};
+
+static const struct drm_connector_helper_funcs
+                               cdv_intel_crt_connector_helper_funcs = {
+       .mode_valid = cdv_intel_crt_mode_valid,
+       .get_modes = cdv_intel_crt_get_modes,
+       .best_encoder = psb_intel_best_encoder,
+};
+
+static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder)
+{
+       drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs cdv_intel_crt_enc_funcs = {
+       .destroy = cdv_intel_crt_enc_destroy,
+};
+
+void cdv_intel_crt_init(struct drm_device *dev,
+                       struct psb_intel_mode_device *mode_dev)
+{
+
+       struct psb_intel_connector *psb_intel_connector;
+       struct psb_intel_encoder *psb_intel_encoder;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+
+       u32 i2c_reg;
+
+       psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+       if (!psb_intel_encoder)
+               return;
+
+       psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+       if (!psb_intel_connector)
+               goto failed_connector;
+
+       connector = &psb_intel_connector->base;
+       drm_connector_init(dev, connector,
+               &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+       encoder = &psb_intel_encoder->base;
+       drm_encoder_init(dev, encoder,
+               &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC);
+
+       psb_intel_connector_attach_encoder(psb_intel_connector,
+                                          psb_intel_encoder);
+
+       /* Set up the DDC bus. */
+       i2c_reg = GPIOA;
+       /* Remove the following code for CDV */
+       /*
+       if (dev_priv->crt_ddc_bus != 0)
+               i2c_reg = dev_priv->crt_ddc_bus;
+       }*/
+       psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
+                                                         i2c_reg, "CRTDDC_A");
+       if (!psb_intel_encoder->ddc_bus) {
+               dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+                          "failed.\n");
+               goto failed_ddc;
+       }
+
+       psb_intel_encoder->type = INTEL_OUTPUT_ANALOG;
+       /*
+       psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT);
+       psb_intel_output->crtc_mask = (1 << 0) | (1 << 1);
+       */
+       connector->interlace_allowed = 0;
+       connector->doublescan_allowed = 0;
+
+       drm_encoder_helper_add(encoder, &cdv_intel_crt_helper_funcs);
+       drm_connector_helper_add(connector,
+                                       &cdv_intel_crt_connector_helper_funcs);
+
+       drm_sysfs_connector_add(connector);
+
+       return;
+failed_ddc:
+       drm_encoder_cleanup(&psb_intel_encoder->base);
+       drm_connector_cleanup(&psb_intel_connector->base);
+       kfree(psb_intel_connector);
+failed_connector:
+       kfree(psb_intel_encoder);
+       return;
+}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
new file mode 100644 (file)
index 0000000..18d1152
--- /dev/null
@@ -0,0 +1,1508 @@
+/*
+ * Copyright Â© 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include "framebuffer.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_display.h"
+#include "power.h"
+#include "cdv_device.h"
+
+
+struct cdv_intel_range_t {
+       int min, max;
+};
+
+struct cdv_intel_p2_t {
+       int dot_limit;
+       int p2_slow, p2_fast;
+};
+
+struct cdv_intel_clock_t {
+       /* given values */
+       int n;
+       int m1, m2;
+       int p1, p2;
+       /* derived values */
+       int dot;
+       int vco;
+       int m;
+       int p;
+};
+
+#define INTEL_P2_NUM                 2
+
+struct cdv_intel_limit_t {
+       struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;
+       struct cdv_intel_p2_t p2;
+};
+
+#define CDV_LIMIT_SINGLE_LVDS_96       0
+#define CDV_LIMIT_SINGLE_LVDS_100      1
+#define CDV_LIMIT_DAC_HDMI_27          2
+#define CDV_LIMIT_DAC_HDMI_96          3
+
+static const struct cdv_intel_limit_t cdv_intel_limits[] = {
+       {                       /* CDV_SIGNLE_LVDS_96MHz */
+        .dot = {.min = 20000, .max = 115500},
+        .vco = {.min = 1800000, .max = 3600000},
+        .n = {.min = 2, .max = 6},
+        .m = {.min = 60, .max = 160},
+        .m1 = {.min = 0, .max = 0},
+        .m2 = {.min = 58, .max = 158},
+        .p = {.min = 28, .max = 140},
+        .p1 = {.min = 2, .max = 10},
+        .p2 = {.dot_limit = 200000,
+               .p2_slow = 14, .p2_fast = 14},
+        },
+       {                       /* CDV_SINGLE_LVDS_100MHz */
+        .dot = {.min = 20000, .max = 115500},
+        .vco = {.min = 1800000, .max = 3600000},
+        .n = {.min = 2, .max = 6},
+        .m = {.min = 60, .max = 160},
+        .m1 = {.min = 0, .max = 0},
+        .m2 = {.min = 58, .max = 158},
+        .p = {.min = 28, .max = 140},
+        .p1 = {.min = 2, .max = 10},
+        /* The single-channel range is 25-112Mhz, and dual-channel
+         * is 80-224Mhz.  Prefer single channel as much as possible.
+         */
+        .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
+        },
+       {                       /* CDV_DAC_HDMI_27MHz */
+        .dot = {.min = 20000, .max = 400000},
+        .vco = {.min = 1809000, .max = 3564000},
+        .n = {.min = 1, .max = 1},
+        .m = {.min = 67, .max = 132},
+        .m1 = {.min = 0, .max = 0},
+        .m2 = {.min = 65, .max = 130},
+        .p = {.min = 5, .max = 90},
+        .p1 = {.min = 1, .max = 9},
+        .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+        },
+       {                       /* CDV_DAC_HDMI_96MHz */
+        .dot = {.min = 20000, .max = 400000},
+        .vco = {.min = 1800000, .max = 3600000},
+        .n = {.min = 2, .max = 6},
+        .m = {.min = 60, .max = 160},
+        .m1 = {.min = 0, .max = 0},
+        .m2 = {.min = 58, .max = 158},
+        .p = {.min = 5, .max = 100},
+        .p1 = {.min = 1, .max = 10},
+        .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+        },
+};
+
+#define _wait_for(COND, MS, W) ({ \
+       unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);       \
+       int ret__ = 0;                                                  \
+       while (!(COND)) {                                               \
+               if (time_after(jiffies, timeout__)) {                   \
+                       ret__ = -ETIMEDOUT;                             \
+                       break;                                          \
+               }                                                       \
+               if (W && !in_dbg_master())                              \
+                       msleep(W);                                      \
+       }                                                               \
+       ret__;                                                          \
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+
+
+static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
+{
+       int ret;
+
+       ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+       if (ret) {
+               DRM_ERROR("timeout waiting for SB to idle before read\n");
+               return ret;
+       }
+
+       REG_WRITE(SB_ADDR, reg);
+       REG_WRITE(SB_PCKT,
+                  SET_FIELD(SB_OPCODE_READ, SB_OPCODE) |
+                  SET_FIELD(SB_DEST_DPLL, SB_DEST) |
+                  SET_FIELD(0xf, SB_BYTE_ENABLE));
+
+       ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+       if (ret) {
+               DRM_ERROR("timeout waiting for SB to idle after read\n");
+               return ret;
+       }
+
+       *val = REG_READ(SB_DATA);
+
+       return 0;
+}
+
+static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
+{
+       int ret;
+       static bool dpio_debug = true;
+       u32 temp;
+
+       if (dpio_debug) {
+               if (cdv_sb_read(dev, reg, &temp) == 0)
+                       DRM_DEBUG_KMS("0x%08x: 0x%08x (before)\n", reg, temp);
+               DRM_DEBUG_KMS("0x%08x: 0x%08x\n", reg, val);
+       }
+
+       ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+       if (ret) {
+               DRM_ERROR("timeout waiting for SB to idle before write\n");
+               return ret;
+       }
+
+       REG_WRITE(SB_ADDR, reg);
+       REG_WRITE(SB_DATA, val);
+       REG_WRITE(SB_PCKT,
+                  SET_FIELD(SB_OPCODE_WRITE, SB_OPCODE) |
+                  SET_FIELD(SB_DEST_DPLL, SB_DEST) |
+                  SET_FIELD(0xf, SB_BYTE_ENABLE));
+
+       ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+       if (ret) {
+               DRM_ERROR("timeout waiting for SB to idle after write\n");
+               return ret;
+       }
+
+       if (dpio_debug) {
+               if (cdv_sb_read(dev, reg, &temp) == 0)
+                       DRM_DEBUG_KMS("0x%08x: 0x%08x (after)\n", reg, temp);
+       }
+
+       return 0;
+}
+
+/* Reset the DPIO configuration register.  The BIOS does this at every
+ * mode set.
+ */
+static void cdv_sb_reset(struct drm_device *dev)
+{
+
+       REG_WRITE(DPIO_CFG, 0);
+       REG_READ(DPIO_CFG);
+       REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N);
+}
+
+/* Unlike most Intel display engines, on Cedarview the DPLL registers
+ * are behind this sideband bus.  They must be programmed while the
+ * DPLL reference clock is on in the DPLL control register, but before
+ * the DPLL is enabled in the DPLL control register.
+ */
+static int
+cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
+                              struct cdv_intel_clock_t *clock)
+{
+       struct psb_intel_crtc *psb_crtc =
+                               to_psb_intel_crtc(crtc);
+       int pipe = psb_crtc->pipe;
+       u32 m, n_vco, p;
+       int ret = 0;
+       int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+       u32 ref_value;
+
+       cdv_sb_reset(dev);
+
+       if ((REG_READ(dpll_reg) & DPLL_SYNCLOCK_ENABLE) == 0) {
+               DRM_ERROR("Attempting to set DPLL with refclk disabled\n");
+               return -EBUSY;
+       }
+
+       /* Follow the BIOS and write the REF/SFR Register. Hardcoded value */
+       ref_value = 0x68A701;
+
+       cdv_sb_write(dev, SB_REF_SFR(pipe), ref_value);
+
+       /* We don't know what the other fields of these regs are, so
+        * leave them in place.
+        */
+       ret = cdv_sb_read(dev, SB_M(pipe), &m);
+       if (ret)
+               return ret;
+       m &= ~SB_M_DIVIDER_MASK;
+       m |= ((clock->m2) << SB_M_DIVIDER_SHIFT);
+       ret = cdv_sb_write(dev, SB_M(pipe), m);
+       if (ret)
+               return ret;
+
+       ret = cdv_sb_read(dev, SB_N_VCO(pipe), &n_vco);
+       if (ret)
+               return ret;
+
+       /* Follow the BIOS to program the N_DIVIDER REG */
+       n_vco &= 0xFFFF;
+       n_vco |= 0x107;
+       n_vco &= ~(SB_N_VCO_SEL_MASK |
+                  SB_N_DIVIDER_MASK |
+                  SB_N_CB_TUNE_MASK);
+
+       n_vco |= ((clock->n) << SB_N_DIVIDER_SHIFT);
+
+       if (clock->vco < 2250000) {
+               n_vco |= (2 << SB_N_CB_TUNE_SHIFT);
+               n_vco |= (0 << SB_N_VCO_SEL_SHIFT);
+       } else if (clock->vco < 2750000) {
+               n_vco |= (1 << SB_N_CB_TUNE_SHIFT);
+               n_vco |= (1 << SB_N_VCO_SEL_SHIFT);
+       } else if (clock->vco < 3300000) {
+               n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
+               n_vco |= (2 << SB_N_VCO_SEL_SHIFT);
+       } else {
+               n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
+               n_vco |= (3 << SB_N_VCO_SEL_SHIFT);
+       }
+
+       ret = cdv_sb_write(dev, SB_N_VCO(pipe), n_vco);
+       if (ret)
+               return ret;
+
+       ret = cdv_sb_read(dev, SB_P(pipe), &p);
+       if (ret)
+               return ret;
+       p &= ~(SB_P2_DIVIDER_MASK | SB_P1_DIVIDER_MASK);
+       p |= SET_FIELD(clock->p1, SB_P1_DIVIDER);
+       switch (clock->p2) {
+       case 5:
+               p |= SET_FIELD(SB_P2_5, SB_P2_DIVIDER);
+               break;
+       case 10:
+               p |= SET_FIELD(SB_P2_10, SB_P2_DIVIDER);
+               break;
+       case 14:
+               p |= SET_FIELD(SB_P2_14, SB_P2_DIVIDER);
+               break;
+       case 7:
+               p |= SET_FIELD(SB_P2_7, SB_P2_DIVIDER);
+               break;
+       default:
+               DRM_ERROR("Bad P2 clock: %d\n", clock->p2);
+               return -EINVAL;
+       }
+       ret = cdv_sb_write(dev, SB_P(pipe), p);
+       if (ret)
+               return ret;
+
+       /* always Program the Lane Register for the Pipe A*/
+       if (pipe == 0) {
+               /* Program the Lane0/1 for HDMI B */
+               u32 lane_reg, lane_value;
+
+               lane_reg = PSB_LANE0;
+               cdv_sb_read(dev, lane_reg, &lane_value);
+               lane_value &= ~(LANE_PLL_MASK);
+               lane_value |= LANE_PLL_ENABLE;
+               cdv_sb_write(dev, lane_reg, lane_value);
+
+               lane_reg = PSB_LANE1;
+               cdv_sb_read(dev, lane_reg, &lane_value);
+               lane_value &= ~(LANE_PLL_MASK);
+               lane_value |= LANE_PLL_ENABLE;
+               cdv_sb_write(dev, lane_reg, lane_value);
+
+               /* Program the Lane2/3 for HDMI C */
+               lane_reg = PSB_LANE2;
+               cdv_sb_read(dev, lane_reg, &lane_value);
+               lane_value &= ~(LANE_PLL_MASK);
+               lane_value |= LANE_PLL_ENABLE;
+               cdv_sb_write(dev, lane_reg, lane_value);
+
+               lane_reg = PSB_LANE3;
+               cdv_sb_read(dev, lane_reg, &lane_value);
+               lane_value &= ~(LANE_PLL_MASK);
+               lane_value |= LANE_PLL_ENABLE;
+               cdv_sb_write(dev, lane_reg, lane_value);
+       }
+
+       return 0;
+}
+
+/*
+ * Returns whether any encoder on the specified pipe is of the specified type
+ */
+bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_connector *l_entry;
+
+       list_for_each_entry(l_entry, &mode_config->connector_list, head) {
+               if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
+                       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(l_entry);
+                       if (psb_intel_encoder->type == type)
+                               return true;
+               }
+       }
+       return false;
+}
+
+static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
+                                                       int refclk)
+{
+       const struct cdv_intel_limit_t *limit;
+       if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+               /*
+                * Now only single-channel LVDS is supported on CDV. If it is
+                * incorrect, please add the dual-channel LVDS.
+                */
+               if (refclk == 96000)
+                       limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
+               else
+                       limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
+       } else {
+               if (refclk == 27000)
+                       limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27];
+               else
+                       limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_96];
+       }
+       return limit;
+}
+
+/* m1 is reserved as 0 in CDV, n is a ring counter */
+static void cdv_intel_clock(struct drm_device *dev,
+                       int refclk, struct cdv_intel_clock_t *clock)
+{
+       clock->m = clock->m2 + 2;
+       clock->p = clock->p1 * clock->p2;
+       clock->vco = (refclk * clock->m) / clock->n;
+       clock->dot = clock->vco / clock->p;
+}
+
+
+#define INTELPllInvalid(s)   { /* ErrorF (s) */; return false; }
+static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc,
+                               const struct cdv_intel_limit_t *limit,
+                              struct cdv_intel_clock_t *clock)
+{
+       if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+               INTELPllInvalid("p1 out of range\n");
+       if (clock->p < limit->p.min || limit->p.max < clock->p)
+               INTELPllInvalid("p out of range\n");
+       /* unnecessary to check the range of m(m1/M2)/n again */
+       if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+               INTELPllInvalid("vco out of range\n");
+       /* XXX: We may need to be checking "Dot clock"
+        * depending on the multiplier, connector, etc.,
+        * rather than just a single range.
+        */
+       if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+               INTELPllInvalid("dot out of range\n");
+
+       return true;
+}
+
+static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target,
+                               int refclk,
+                               struct cdv_intel_clock_t *best_clock)
+{
+       struct drm_device *dev = crtc->dev;
+       struct cdv_intel_clock_t clock;
+       const struct cdv_intel_limit_t *limit = cdv_intel_limit(crtc, refclk);
+       int err = target;
+
+
+       if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+           (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
+               /*
+                * For LVDS, if the panel is on, just rely on its current
+                * settings for dual-channel.  We haven't figured out how to
+                * reliably set up different single/dual channel state, if we
+                * even can.
+                */
+               if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+                   LVDS_CLKB_POWER_UP)
+                       clock.p2 = limit->p2.p2_fast;
+               else
+                       clock.p2 = limit->p2.p2_slow;
+       } else {
+               if (target < limit->p2.dot_limit)
+                       clock.p2 = limit->p2.p2_slow;
+               else
+                       clock.p2 = limit->p2.p2_fast;
+       }
+
+       memset(best_clock, 0, sizeof(*best_clock));
+       clock.m1 = 0;
+       /* m1 is reserved as 0 in CDV, n is a ring counter.
+          So skip the m1 loop */
+       for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
+               for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max;
+                                            clock.m2++) {
+                       for (clock.p1 = limit->p1.min;
+                                       clock.p1 <= limit->p1.max;
+                                       clock.p1++) {
+                               int this_err;
+
+                               cdv_intel_clock(dev, refclk, &clock);
+
+                               if (!cdv_intel_PLL_is_valid(crtc,
+                                                               limit, &clock))
+                                               continue;
+
+                               this_err = abs(clock.dot - target);
+                               if (this_err < err) {
+                                       *best_clock = clock;
+                                       err = this_err;
+                               }
+                       }
+               }
+       }
+
+       return err != target;
+}
+
+int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
+                           int x, int y, struct drm_framebuffer *old_fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+       int pipe = psb_intel_crtc->pipe;
+       unsigned long start, offset;
+       int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
+       int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
+       int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       u32 dspcntr;
+       int ret = 0;
+
+       if (!gma_power_begin(dev, true))
+               return 0;
+
+       /* no fb bound */
+       if (!crtc->fb) {
+               dev_err(dev->dev, "No FB bound\n");
+               goto psb_intel_pipe_cleaner;
+       }
+
+
+       /* We are displaying this buffer, make sure it is actually loaded
+          into the GTT */
+       ret = psb_gtt_pin(psbfb->gtt);
+       if (ret < 0)
+               goto psb_intel_pipe_set_base_exit;
+       start = psbfb->gtt->offset;
+       offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+
+       REG_WRITE(dspstride, crtc->fb->pitches[0]);
+
+       dspcntr = REG_READ(dspcntr_reg);
+       dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+       switch (crtc->fb->bits_per_pixel) {
+       case 8:
+               dspcntr |= DISPPLANE_8BPP;
+               break;
+       case 16:
+               if (crtc->fb->depth == 15)
+                       dspcntr |= DISPPLANE_15_16BPP;
+               else
+                       dspcntr |= DISPPLANE_16BPP;
+               break;
+       case 24:
+       case 32:
+               dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+               break;
+       default:
+               dev_err(dev->dev, "Unknown color depth\n");
+               ret = -EINVAL;
+               goto psb_intel_pipe_set_base_exit;
+       }
+       REG_WRITE(dspcntr_reg, dspcntr);
+
+       dev_dbg(dev->dev,
+               "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
+
+       REG_WRITE(dspbase, offset);
+       REG_READ(dspbase);
+       REG_WRITE(dspsurf, start);
+       REG_READ(dspsurf);
+
+psb_intel_pipe_cleaner:
+       /* If there was a previous display we can now unpin it */
+       if (old_fb)
+               psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+
+psb_intel_pipe_set_base_exit:
+       gma_power_end(dev);
+       return ret;
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
+       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+       u32 temp;
+       bool enabled;
+
+       /* XXX: When our outputs are all unaware of DPMS modes other than off
+        * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+        */
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+               /* Enable the DPLL */
+               temp = REG_READ(dpll_reg);
+               if ((temp & DPLL_VCO_ENABLE) == 0) {
+                       REG_WRITE(dpll_reg, temp);
+                       REG_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+                       REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+                       REG_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+                       REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+                       REG_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+               }
+
+               /* Jim Bish - switch plan and pipe per scott */
+               /* Enable the plane */
+               temp = REG_READ(dspcntr_reg);
+               if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+                       REG_WRITE(dspcntr_reg,
+                                 temp | DISPLAY_PLANE_ENABLE);
+                       /* Flush the plane changes */
+                       REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+               }
+
+               udelay(150);
+
+               /* Enable the pipe */
+               temp = REG_READ(pipeconf_reg);
+               if ((temp & PIPEACONF_ENABLE) == 0)
+                       REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+
+               psb_intel_crtc_load_lut(crtc);
+
+               /* Give the overlay scaler a chance to enable
+                * if it's on this pipe */
+               /* psb_intel_crtc_dpms_video(crtc, true); TODO */
+               break;
+       case DRM_MODE_DPMS_OFF:
+               /* Give the overlay scaler a chance to disable
+                * if it's on this pipe */
+               /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+
+               /* Disable the VGA plane that we never use */
+               REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+               /* Jim Bish - changed pipe/plane here as well. */
+
+               /* Wait for vblank for the disable to take effect */
+               cdv_intel_wait_for_vblank(dev);
+
+               /* Next, disable display pipes */
+               temp = REG_READ(pipeconf_reg);
+               if ((temp & PIPEACONF_ENABLE) != 0) {
+                       REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+                       REG_READ(pipeconf_reg);
+               }
+
+               /* Wait for vblank for the disable to take effect. */
+               cdv_intel_wait_for_vblank(dev);
+
+               udelay(150);
+
+               /* Disable display plane */
+               temp = REG_READ(dspcntr_reg);
+               if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+                       REG_WRITE(dspcntr_reg,
+                                 temp & ~DISPLAY_PLANE_ENABLE);
+                       /* Flush the plane changes */
+                       REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+                       REG_READ(dspbase_reg);
+               }
+
+               temp = REG_READ(dpll_reg);
+               if ((temp & DPLL_VCO_ENABLE) != 0) {
+                       REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
+                       REG_READ(dpll_reg);
+               }
+
+               /* Wait for the clocks to turn off. */
+               udelay(150);
+               break;
+       }
+       enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+       /*Set FIFO Watermarks*/
+       REG_WRITE(DSPARB, 0x3F3E);
+}
+
+static void cdv_intel_crtc_prepare(struct drm_crtc *crtc)
+{
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void cdv_intel_crtc_commit(struct drm_crtc *crtc)
+{
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+void cdv_intel_encoder_prepare(struct drm_encoder *encoder)
+{
+       struct drm_encoder_helper_funcs *encoder_funcs =
+           encoder->helper_private;
+       /* lvds has its own version of prepare see cdv_intel_lvds_prepare */
+       encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void cdv_intel_encoder_commit(struct drm_encoder *encoder)
+{
+       struct drm_encoder_helper_funcs *encoder_funcs =
+           encoder->helper_private;
+       /* lvds has its own version of commit see cdv_intel_lvds_commit */
+       encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int cdv_intel_panel_fitter_pipe(struct drm_device *dev)
+{
+       u32 pfit_control;
+
+       pfit_control = REG_READ(PFIT_CONTROL);
+
+       /* See if the panel fitter is in use */
+       if ((pfit_control & PFIT_ENABLE) == 0)
+               return -1;
+       return (pfit_control >> 29) & 0x3;
+}
+
+static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
+                              struct drm_display_mode *mode,
+                              struct drm_display_mode *adjusted_mode,
+                              int x, int y,
+                              struct drm_framebuffer *old_fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+       int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+       int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+       int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+       int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+       int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+       int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+       int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+       int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+       int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+       int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+       int refclk;
+       struct cdv_intel_clock_t clock;
+       u32 dpll = 0, dspcntr, pipeconf;
+       bool ok, is_sdvo = false, is_dvo = false;
+       bool is_crt = false, is_lvds = false, is_tv = false;
+       bool is_hdmi = false;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_connector *connector;
+
+       list_for_each_entry(connector, &mode_config->connector_list, head) {
+               struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+
+               if (!connector->encoder
+                   || connector->encoder->crtc != crtc)
+                       continue;
+
+               switch (psb_intel_encoder->type) {
+               case INTEL_OUTPUT_LVDS:
+                       is_lvds = true;
+                       break;
+               case INTEL_OUTPUT_SDVO:
+                       is_sdvo = true;
+                       break;
+               case INTEL_OUTPUT_DVO:
+                       is_dvo = true;
+                       break;
+               case INTEL_OUTPUT_TVOUT:
+                       is_tv = true;
+                       break;
+               case INTEL_OUTPUT_ANALOG:
+                       is_crt = true;
+                       break;
+               case INTEL_OUTPUT_HDMI:
+                       is_hdmi = true;
+                       break;
+               }
+       }
+
+       refclk = 96000;
+
+       /* Hack selection about ref clk for CRT */
+       /* Select 27MHz as the reference clk for HDMI */
+       if (is_crt || is_hdmi)
+               refclk = 27000;
+
+       drm_mode_debug_printmodeline(adjusted_mode);
+
+       ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
+                                &clock);
+       if (!ok) {
+               dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
+               return 0;
+       }
+
+       dpll = DPLL_VGA_MODE_DIS;
+       if (is_tv) {
+               /* XXX: just matching BIOS for now */
+/*     dpll |= PLL_REF_INPUT_TVCLKINBC; */
+               dpll |= 3;
+       }
+               dpll |= PLL_REF_INPUT_DREFCLK;
+
+       dpll |= DPLL_SYNCLOCK_ENABLE;
+       dpll |= DPLL_VGA_MODE_DIS;
+       if (is_lvds)
+               dpll |= DPLLB_MODE_LVDS;
+       else
+               dpll |= DPLLB_MODE_DAC_SERIAL;
+       /* dpll |= (2 << 11); */
+
+       /* setup pipeconf */
+       pipeconf = REG_READ(pipeconf_reg);
+
+       /* Set up the display plane register */
+       dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+       if (pipe == 0)
+               dspcntr |= DISPPLANE_SEL_PIPE_A;
+       else
+               dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+       dspcntr |= DISPLAY_PLANE_ENABLE;
+       pipeconf |= PIPEACONF_ENABLE;
+
+       REG_WRITE(dpll_reg, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
+       REG_READ(dpll_reg);
+
+       cdv_dpll_set_clock_cdv(dev, crtc, &clock);
+
+       udelay(150);
+
+
+       /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+        * This is an exception to the general rule that mode_set doesn't turn
+        * things on.
+        */
+       if (is_lvds) {
+               u32 lvds = REG_READ(LVDS);
+
+               lvds |=
+                   LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
+                   LVDS_PIPEB_SELECT;
+               /* Set the B0-B3 data pairs corresponding to
+                * whether we're going to
+                * set the DPLLs for dual-channel mode or not.
+                */
+               if (clock.p2 == 7)
+                       lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+               else
+                       lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+               /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+                * appropriately here, but we need to look more
+                * thoroughly into how panels behave in the two modes.
+                */
+
+               REG_WRITE(LVDS, lvds);
+               REG_READ(LVDS);
+       }
+
+       dpll |= DPLL_VCO_ENABLE;
+
+       /* Disable the panel fitter if it was on our pipe */
+       if (cdv_intel_panel_fitter_pipe(dev) == pipe)
+               REG_WRITE(PFIT_CONTROL, 0);
+
+       DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+       drm_mode_debug_printmodeline(mode);
+
+       REG_WRITE(dpll_reg,
+               (REG_READ(dpll_reg) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
+       REG_READ(dpll_reg);
+       /* Wait for the clocks to stabilize. */
+       udelay(150); /* 42 usec w/o calibration, 110 with.  rounded up. */
+
+       if (!(REG_READ(dpll_reg) & DPLL_LOCK)) {
+               dev_err(dev->dev, "Failed to get DPLL lock\n");
+               return -EBUSY;
+       }
+
+       {
+               int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+               REG_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
+       }
+
+       REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+                 ((adjusted_mode->crtc_htotal - 1) << 16));
+       REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+                 ((adjusted_mode->crtc_hblank_end - 1) << 16));
+       REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+                 ((adjusted_mode->crtc_hsync_end - 1) << 16));
+       REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+                 ((adjusted_mode->crtc_vtotal - 1) << 16));
+       REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+                 ((adjusted_mode->crtc_vblank_end - 1) << 16));
+       REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+                 ((adjusted_mode->crtc_vsync_end - 1) << 16));
+       /* pipesrc and dspsize control the size that is scaled from,
+        * which should always be the user's requested size.
+        */
+       REG_WRITE(dspsize_reg,
+                 ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+       REG_WRITE(dsppos_reg, 0);
+       REG_WRITE(pipesrc_reg,
+                 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+       REG_WRITE(pipeconf_reg, pipeconf);
+       REG_READ(pipeconf_reg);
+
+       cdv_intel_wait_for_vblank(dev);
+
+       REG_WRITE(dspcntr_reg, dspcntr);
+
+       /* Flush the plane changes */
+       {
+               struct drm_crtc_helper_funcs *crtc_funcs =
+                   crtc->helper_private;
+               crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+       }
+
+       cdv_intel_wait_for_vblank(dev);
+
+       return 0;
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_psb_private *dev_priv =
+                               (struct drm_psb_private *)dev->dev_private;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int palreg = PALETTE_A;
+       int i;
+
+       /* The clocks have to be on to load the palette. */
+       if (!crtc->enabled)
+               return;
+
+       switch (psb_intel_crtc->pipe) {
+       case 0:
+               break;
+       case 1:
+               palreg = PALETTE_B;
+               break;
+       case 2:
+               palreg = PALETTE_C;
+               break;
+       default:
+               dev_err(dev->dev, "Illegal Pipe Number.\n");
+               return;
+       }
+
+       if (gma_power_begin(dev, false)) {
+               for (i = 0; i < 256; i++) {
+                       REG_WRITE(palreg + 4 * i,
+                                 ((psb_intel_crtc->lut_r[i] +
+                                 psb_intel_crtc->lut_adj[i]) << 16) |
+                                 ((psb_intel_crtc->lut_g[i] +
+                                 psb_intel_crtc->lut_adj[i]) << 8) |
+                                 (psb_intel_crtc->lut_b[i] +
+                                 psb_intel_crtc->lut_adj[i]));
+               }
+               gma_power_end(dev);
+       } else {
+               for (i = 0; i < 256; i++) {
+                       dev_priv->save_palette_a[i] =
+                                 ((psb_intel_crtc->lut_r[i] +
+                                 psb_intel_crtc->lut_adj[i]) << 16) |
+                                 ((psb_intel_crtc->lut_g[i] +
+                                 psb_intel_crtc->lut_adj[i]) << 8) |
+                                 (psb_intel_crtc->lut_b[i] +
+                                 psb_intel_crtc->lut_adj[i]);
+               }
+
+       }
+}
+
+/**
+ * Save HW states of giving crtc
+ */
+static void cdv_intel_crtc_save(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       /* struct drm_psb_private *dev_priv =
+                       (struct drm_psb_private *)dev->dev_private; */
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+       int pipeA = (psb_intel_crtc->pipe == 0);
+       uint32_t paletteReg;
+       int i;
+
+       if (!crtc_state) {
+               dev_dbg(dev->dev, "No CRTC state found\n");
+               return;
+       }
+
+       crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
+       crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
+       crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
+       crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
+       crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
+       crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
+       crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
+       crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
+       crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
+       crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
+       crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
+       crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
+       crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
+
+       /*NOTE: DSPSIZE DSPPOS only for psb*/
+       crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
+       crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
+
+       crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
+
+       DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+                       crtc_state->saveDSPCNTR,
+                       crtc_state->savePIPECONF,
+                       crtc_state->savePIPESRC,
+                       crtc_state->saveFP0,
+                       crtc_state->saveFP1,
+                       crtc_state->saveDPLL,
+                       crtc_state->saveHTOTAL,
+                       crtc_state->saveHBLANK,
+                       crtc_state->saveHSYNC,
+                       crtc_state->saveVTOTAL,
+                       crtc_state->saveVBLANK,
+                       crtc_state->saveVSYNC,
+                       crtc_state->saveDSPSTRIDE,
+                       crtc_state->saveDSPSIZE,
+                       crtc_state->saveDSPPOS,
+                       crtc_state->saveDSPBASE
+               );
+
+       paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+       for (i = 0; i < 256; ++i)
+               crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
+}
+
+/**
+ * Restore HW states of giving crtc
+ */
+static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       /* struct drm_psb_private * dev_priv =
+                               (struct drm_psb_private *)dev->dev_private; */
+       struct psb_intel_crtc *psb_intel_crtc =  to_psb_intel_crtc(crtc);
+       struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+       /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
+       int pipeA = (psb_intel_crtc->pipe == 0);
+       uint32_t paletteReg;
+       int i;
+
+       if (!crtc_state) {
+               dev_dbg(dev->dev, "No crtc state\n");
+               return;
+       }
+
+       DRM_DEBUG(
+               "current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+               REG_READ(pipeA ? DSPACNTR : DSPBCNTR),
+               REG_READ(pipeA ? PIPEACONF : PIPEBCONF),
+               REG_READ(pipeA ? PIPEASRC : PIPEBSRC),
+               REG_READ(pipeA ? FPA0 : FPB0),
+               REG_READ(pipeA ? FPA1 : FPB1),
+               REG_READ(pipeA ? DPLL_A : DPLL_B),
+               REG_READ(pipeA ? HTOTAL_A : HTOTAL_B),
+               REG_READ(pipeA ? HBLANK_A : HBLANK_B),
+               REG_READ(pipeA ? HSYNC_A : HSYNC_B),
+               REG_READ(pipeA ? VTOTAL_A : VTOTAL_B),
+               REG_READ(pipeA ? VBLANK_A : VBLANK_B),
+               REG_READ(pipeA ? VSYNC_A : VSYNC_B),
+               REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE),
+               REG_READ(pipeA ? DSPASIZE : DSPBSIZE),
+               REG_READ(pipeA ? DSPAPOS : DSPBPOS),
+               REG_READ(pipeA ? DSPABASE : DSPBBASE)
+               );
+
+       DRM_DEBUG(
+               "saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+               crtc_state->saveDSPCNTR,
+               crtc_state->savePIPECONF,
+               crtc_state->savePIPESRC,
+               crtc_state->saveFP0,
+               crtc_state->saveFP1,
+               crtc_state->saveDPLL,
+               crtc_state->saveHTOTAL,
+               crtc_state->saveHBLANK,
+               crtc_state->saveHSYNC,
+               crtc_state->saveVTOTAL,
+               crtc_state->saveVBLANK,
+               crtc_state->saveVSYNC,
+               crtc_state->saveDSPSTRIDE,
+               crtc_state->saveDSPSIZE,
+               crtc_state->saveDSPPOS,
+               crtc_state->saveDSPBASE
+               );
+
+
+       if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
+               REG_WRITE(pipeA ? DPLL_A : DPLL_B,
+                       crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+               REG_READ(pipeA ? DPLL_A : DPLL_B);
+               DRM_DEBUG("write dpll: %x\n",
+                               REG_READ(pipeA ? DPLL_A : DPLL_B));
+               udelay(150);
+       }
+
+       REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
+       REG_READ(pipeA ? FPA0 : FPB0);
+
+       REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
+       REG_READ(pipeA ? FPA1 : FPB1);
+
+       REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
+       REG_READ(pipeA ? DPLL_A : DPLL_B);
+       udelay(150);
+
+       REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
+       REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
+       REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
+       REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
+       REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
+       REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
+       REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
+
+       REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
+       REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
+
+       REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
+       REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+       REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
+
+       cdv_intel_wait_for_vblank(dev);
+
+       REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
+       REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+
+       cdv_intel_wait_for_vblank(dev);
+
+       paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+       for (i = 0; i < 256; ++i)
+               REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
+}
+
+static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
+                                struct drm_file *file_priv,
+                                uint32_t handle,
+                                uint32_t width, uint32_t height)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
+       uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
+       uint32_t temp;
+       size_t addr = 0;
+       struct gtt_range *gt;
+       struct drm_gem_object *obj;
+       int ret;
+
+       /* if we want to turn of the cursor ignore width and height */
+       if (!handle) {
+               /* turn off the cursor */
+               temp = CURSOR_MODE_DISABLE;
+
+               if (gma_power_begin(dev, false)) {
+                       REG_WRITE(control, temp);
+                       REG_WRITE(base, 0);
+                       gma_power_end(dev);
+               }
+
+               /* unpin the old GEM object */
+               if (psb_intel_crtc->cursor_obj) {
+                       gt = container_of(psb_intel_crtc->cursor_obj,
+                                                       struct gtt_range, gem);
+                       psb_gtt_unpin(gt);
+                       drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+                       psb_intel_crtc->cursor_obj = NULL;
+               }
+
+               return 0;
+       }
+
+       /* Currently we only support 64x64 cursors */
+       if (width != 64 || height != 64) {
+               dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
+               return -EINVAL;
+       }
+
+       obj = drm_gem_object_lookup(dev, file_priv, handle);
+       if (!obj)
+               return -ENOENT;
+
+       if (obj->size < width * height * 4) {
+               dev_dbg(dev->dev, "buffer is to small\n");
+               return -ENOMEM;
+       }
+
+       gt = container_of(obj, struct gtt_range, gem);
+
+       /* Pin the memory into the GTT */
+       ret = psb_gtt_pin(gt);
+       if (ret) {
+               dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
+               return ret;
+       }
+
+       addr = gt->offset;      /* Or resource.start ??? */
+
+       psb_intel_crtc->cursor_addr = addr;
+
+       temp = 0;
+       /* set the pipe for the cursor */
+       temp |= (pipe << 28);
+       temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+
+       if (gma_power_begin(dev, false)) {
+               REG_WRITE(control, temp);
+               REG_WRITE(base, addr);
+               gma_power_end(dev);
+       }
+
+       /* unpin the old GEM object */
+       if (psb_intel_crtc->cursor_obj) {
+               gt = container_of(psb_intel_crtc->cursor_obj,
+                                                       struct gtt_range, gem);
+               psb_gtt_unpin(gt);
+               drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+               psb_intel_crtc->cursor_obj = obj;
+       }
+       return 0;
+}
+
+static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       uint32_t temp = 0;
+       uint32_t adder;
+
+
+       if (x < 0) {
+               temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+               x = -x;
+       }
+       if (y < 0) {
+               temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+               y = -y;
+       }
+
+       temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
+       temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+
+       adder = psb_intel_crtc->cursor_addr;
+
+       if (gma_power_begin(dev, false)) {
+               REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
+               REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
+               gma_power_end(dev);
+       }
+       return 0;
+}
+
+static void cdv_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+                        u16 *green, u16 *blue, uint32_t start, uint32_t size)
+{
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int i;
+       int end = (start + size > 256) ? 256 : start + size;
+
+       for (i = start; i < end; i++) {
+               psb_intel_crtc->lut_r[i] = red[i] >> 8;
+               psb_intel_crtc->lut_g[i] = green[i] >> 8;
+               psb_intel_crtc->lut_b[i] = blue[i] >> 8;
+       }
+
+       cdv_intel_crtc_load_lut(crtc);
+}
+
+static int cdv_crtc_set_config(struct drm_mode_set *set)
+{
+       int ret = 0;
+       struct drm_device *dev = set->crtc->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (!dev_priv->rpm_enabled)
+               return drm_crtc_helper_set_config(set);
+
+       pm_runtime_forbid(&dev->pdev->dev);
+
+       ret = drm_crtc_helper_set_config(set);
+
+       pm_runtime_allow(&dev->pdev->dev);
+
+       return ret;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+
+/* FIXME: why are we using this, should it be cdv_ in this tree ? */
+
+static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock)
+{
+       clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+       clock->p = clock->p1 * clock->p2;
+       clock->vco = refclk * clock->m / (clock->n + 2);
+       clock->dot = clock->vco / clock->p;
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+static int cdv_intel_crtc_clock_get(struct drm_device *dev,
+                               struct drm_crtc *crtc)
+{
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       u32 dpll;
+       u32 fp;
+       struct cdv_intel_clock_t clock;
+       bool is_lvds;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (gma_power_begin(dev, false)) {
+               dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
+               if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+                       fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
+               else
+                       fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
+               is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
+               gma_power_end(dev);
+       } else {
+               dpll = (pipe == 0) ?
+                       dev_priv->saveDPLL_A : dev_priv->saveDPLL_B;
+
+               if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+                       fp = (pipe == 0) ?
+                               dev_priv->saveFPA0 :
+                               dev_priv->saveFPB0;
+               else
+                       fp = (pipe == 0) ?
+                               dev_priv->saveFPA1 :
+                               dev_priv->saveFPB1;
+
+               is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN);
+       }
+
+       clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+       clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+       clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+
+       if (is_lvds) {
+               clock.p1 =
+                   ffs((dpll &
+                        DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+                       DPLL_FPA01_P1_POST_DIV_SHIFT);
+               if (clock.p1 == 0) {
+                       clock.p1 = 4;
+                       dev_err(dev->dev, "PLL %d\n", dpll);
+               }
+               clock.p2 = 14;
+
+               if ((dpll & PLL_REF_INPUT_MASK) ==
+                   PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+                       /* XXX: might not be 66MHz */
+                       i8xx_clock(66000, &clock);
+               } else
+                       i8xx_clock(48000, &clock);
+       } else {
+               if (dpll & PLL_P1_DIVIDE_BY_TWO)
+                       clock.p1 = 2;
+               else {
+                       clock.p1 =
+                           ((dpll &
+                             DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+                            DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+               }
+               if (dpll & PLL_P2_DIVIDE_BY_4)
+                       clock.p2 = 4;
+               else
+                       clock.p2 = 2;
+
+               i8xx_clock(48000, &clock);
+       }
+
+       /* XXX: It would be nice to validate the clocks, but we can't reuse
+        * i830PllIsValid() because it relies on the xf86_config connector
+        * configuration being accurate, which it isn't necessarily.
+        */
+
+       return clock.dot;
+}
+
+/** Returns the currently programmed mode of the given pipe. */
+struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
+                                            struct drm_crtc *crtc)
+{
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       struct drm_display_mode *mode;
+       int htot;
+       int hsync;
+       int vtot;
+       int vsync;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (gma_power_begin(dev, false)) {
+               htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
+               hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
+               vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
+               vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+               gma_power_end(dev);
+       } else {
+               htot = (pipe == 0) ?
+                       dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B;
+               hsync = (pipe == 0) ?
+                       dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B;
+               vtot = (pipe == 0) ?
+                       dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B;
+               vsync = (pipe == 0) ?
+                       dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B;
+       }
+
+       mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+       if (!mode)
+               return NULL;
+
+       mode->clock = cdv_intel_crtc_clock_get(dev, crtc);
+       mode->hdisplay = (htot & 0xffff) + 1;
+       mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+       mode->hsync_start = (hsync & 0xffff) + 1;
+       mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+       mode->vdisplay = (vtot & 0xffff) + 1;
+       mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+       mode->vsync_start = (vsync & 0xffff) + 1;
+       mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+
+       drm_mode_set_name(mode);
+       drm_mode_set_crtcinfo(mode, 0);
+
+       return mode;
+}
+
+static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
+{
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+
+       kfree(psb_intel_crtc->crtc_state);
+       drm_crtc_cleanup(crtc);
+       kfree(psb_intel_crtc);
+}
+
+const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
+       .dpms = cdv_intel_crtc_dpms,
+       .mode_fixup = cdv_intel_crtc_mode_fixup,
+       .mode_set = cdv_intel_crtc_mode_set,
+       .mode_set_base = cdv_intel_pipe_set_base,
+       .prepare = cdv_intel_crtc_prepare,
+       .commit = cdv_intel_crtc_commit,
+};
+
+const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
+       .save = cdv_intel_crtc_save,
+       .restore = cdv_intel_crtc_restore,
+       .cursor_set = cdv_intel_crtc_cursor_set,
+       .cursor_move = cdv_intel_crtc_cursor_move,
+       .gamma_set = cdv_intel_crtc_gamma_set,
+       .set_config = cdv_crtc_set_config,
+       .destroy = cdv_intel_crtc_destroy,
+};
+
+/*
+ * Set the default value of cursor control and base register
+ * to zero. This is a workaround for h/w defect on oaktrail
+ */
+void cdv_intel_cursor_init(struct drm_device *dev, int pipe)
+{
+       uint32_t control;
+       uint32_t base;
+
+       switch (pipe) {
+       case 0:
+               control = CURACNTR;
+               base = CURABASE;
+               break;
+       case 1:
+               control = CURBCNTR;
+               base = CURBBASE;
+               break;
+       case 2:
+               control = CURCCNTR;
+               base = CURCBASE;
+               break;
+       default:
+               return;
+       }
+
+       REG_WRITE(control, 0);
+       REG_WRITE(base, 0);
+}
+
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
new file mode 100644 (file)
index 0000000..50d7cfb
--- /dev/null
@@ -0,0 +1,394 @@
+/*
+ * Copyright Â© 2006-2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     jim liu <jim.liu@intel.com>
+ *
+ * FIXME:
+ *     We should probably make this generic and share it with Medfield
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include "psb_intel_drv.h"
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include <linux/pm_runtime.h>
+
+/* hdmi control bits */
+#define HDMI_NULL_PACKETS_DURING_VSYNC (1 << 9)
+#define HDMI_BORDER_ENABLE             (1 << 7)
+#define HDMI_AUDIO_ENABLE              (1 << 6)
+#define HDMI_VSYNC_ACTIVE_HIGH         (1 << 4)
+#define HDMI_HSYNC_ACTIVE_HIGH         (1 << 3)
+/* hdmi-b control bits */
+#define        HDMIB_PIPE_B_SELECT             (1 << 30)
+
+
+struct mid_intel_hdmi_priv {
+       u32 hdmi_reg;
+       u32 save_HDMIB;
+       bool has_hdmi_sink;
+       bool has_hdmi_audio;
+       /* Should set this when detect hotplug */
+       bool hdmi_device_connected;
+       struct mdfld_hdmi_i2c *i2c_bus;
+       struct i2c_adapter *hdmi_i2c_adapter;   /* for control functions */
+       struct drm_device *dev;
+};
+
+static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
+                       struct drm_display_mode *mode,
+                       struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct psb_intel_encoder *psb_intel_encoder = to_psb_intel_encoder(encoder);
+       struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+       u32 hdmib;
+       struct drm_crtc *crtc = encoder->crtc;
+       struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+
+       hdmib = (2 << 10);
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+               hdmib |= HDMI_VSYNC_ACTIVE_HIGH;
+       if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+               hdmib |= HDMI_HSYNC_ACTIVE_HIGH;
+
+       if (intel_crtc->pipe == 1)
+               hdmib |= HDMIB_PIPE_B_SELECT;
+
+       if (hdmi_priv->has_hdmi_audio) {
+               hdmib |= HDMI_AUDIO_ENABLE;
+               hdmib |= HDMI_NULL_PACKETS_DURING_VSYNC;
+       }
+
+       REG_WRITE(hdmi_priv->hdmi_reg, hdmib);
+       REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                               to_psb_intel_encoder(encoder);
+       struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+       u32 hdmib;
+
+       hdmib = REG_READ(hdmi_priv->hdmi_reg);
+
+       if (mode != DRM_MODE_DPMS_ON)
+               REG_WRITE(hdmi_priv->hdmi_reg, hdmib & ~HDMIB_PORT_EN);
+       else
+               REG_WRITE(hdmi_priv->hdmi_reg, hdmib | HDMIB_PORT_EN);
+       REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static void cdv_hdmi_save(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+       struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+
+       hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static void cdv_hdmi_restore(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+       struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+
+       REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB);
+       REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static enum drm_connector_status cdv_hdmi_detect(
+                               struct drm_connector *connector, bool force)
+{
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+       struct psb_intel_connector *psb_intel_connector =
+                                       to_psb_intel_connector(connector);
+       struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+       struct edid *edid = NULL;
+       enum drm_connector_status status = connector_status_disconnected;
+
+       edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter);
+
+       hdmi_priv->has_hdmi_sink = false;
+       hdmi_priv->has_hdmi_audio = false;
+       if (edid) {
+               if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+                       status = connector_status_connected;
+                       hdmi_priv->has_hdmi_sink =
+                                               drm_detect_hdmi_monitor(edid);
+                       hdmi_priv->has_hdmi_audio =
+                                               drm_detect_monitor_audio(edid);
+               }
+
+               psb_intel_connector->base.display_info.raw_edid = NULL;
+               kfree(edid);
+       }
+       return status;
+}
+
+static int cdv_hdmi_set_property(struct drm_connector *connector,
+                                      struct drm_property *property,
+                                      uint64_t value)
+{
+       struct drm_encoder *encoder = connector->encoder;
+
+       if (!strcmp(property->name, "scaling mode") && encoder) {
+               struct psb_intel_crtc *crtc = to_psb_intel_crtc(encoder->crtc);
+               bool centre;
+               uint64_t curValue;
+
+               if (!crtc)
+                       return -1;
+
+               switch (value) {
+               case DRM_MODE_SCALE_FULLSCREEN:
+                       break;
+               case DRM_MODE_SCALE_NO_SCALE:
+                       break;
+               case DRM_MODE_SCALE_ASPECT:
+                       break;
+               default:
+                       return -1;
+               }
+
+               if (drm_connector_property_get_value(connector,
+                                                       property, &curValue))
+                       return -1;
+
+               if (curValue == value)
+                       return 0;
+
+               if (drm_connector_property_set_value(connector,
+                                                       property, value))
+                       return -1;
+
+               centre = (curValue == DRM_MODE_SCALE_NO_SCALE) ||
+                       (value == DRM_MODE_SCALE_NO_SCALE);
+
+               if (crtc->saved_mode.hdisplay != 0 &&
+                   crtc->saved_mode.vdisplay != 0) {
+                       if (centre) {
+                               if (!drm_crtc_helper_set_mode(encoder->crtc, &crtc->saved_mode,
+                                           encoder->crtc->x, encoder->crtc->y, encoder->crtc->fb))
+                                       return -1;
+                       } else {
+                               struct drm_encoder_helper_funcs *helpers
+                                                   = encoder->helper_private;
+                               helpers->mode_set(encoder, &crtc->saved_mode,
+                                            &crtc->saved_adjusted_mode);
+                       }
+               }
+       }
+       return 0;
+}
+
+/*
+ * Return the list of HDMI DDC modes if available.
+ */
+static int cdv_hdmi_get_modes(struct drm_connector *connector)
+{
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+       struct edid *edid = NULL;
+       int ret = 0;
+
+       edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter);
+       if (edid) {
+               drm_mode_connector_update_edid_property(connector, edid);
+               ret = drm_add_edid_modes(connector, edid);
+               kfree(edid);
+       }
+       return ret;
+}
+
+static int cdv_hdmi_mode_valid(struct drm_connector *connector,
+                                struct drm_display_mode *mode)
+{
+
+       if (mode->clock > 165000)
+               return MODE_CLOCK_HIGH;
+       if (mode->clock < 20000)
+               return MODE_CLOCK_HIGH;
+
+       /* just in case */
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
+       /* just in case */
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               return MODE_NO_INTERLACE;
+
+       /*
+        * FIXME: for now we limit the size to 1680x1050 on CDV, otherwise it
+        * will go beyond the stolen memory size allocated to the framebuffer
+        */
+       if (mode->hdisplay > 1680)
+               return MODE_PANEL;
+       if (mode->vdisplay > 1050)
+               return MODE_PANEL;
+       return MODE_OK;
+}
+
+static void cdv_hdmi_destroy(struct drm_connector *connector)
+{
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+
+       if (psb_intel_encoder->i2c_bus)
+               psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+       drm_sysfs_connector_remove(connector);
+       drm_connector_cleanup(connector);
+       kfree(connector);
+}
+
+static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
+       .dpms = cdv_hdmi_dpms,
+       .mode_fixup = cdv_hdmi_mode_fixup,
+       .prepare = psb_intel_encoder_prepare,
+       .mode_set = cdv_hdmi_mode_set,
+       .commit = psb_intel_encoder_commit,
+};
+
+static const struct drm_connector_helper_funcs
+                                       cdv_hdmi_connector_helper_funcs = {
+       .get_modes = cdv_hdmi_get_modes,
+       .mode_valid = cdv_hdmi_mode_valid,
+       .best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .save = cdv_hdmi_save,
+       .restore = cdv_hdmi_restore,
+       .detect = cdv_hdmi_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .set_property = cdv_hdmi_set_property,
+       .destroy = cdv_hdmi_destroy,
+};
+
+void cdv_hdmi_init(struct drm_device *dev,
+                       struct psb_intel_mode_device *mode_dev, int reg)
+{
+       struct psb_intel_encoder *psb_intel_encoder;
+       struct psb_intel_connector *psb_intel_connector;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+       struct mid_intel_hdmi_priv *hdmi_priv;
+       int ddc_bus;
+
+       psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
+                                   GFP_KERNEL);
+
+       if (!psb_intel_encoder)
+               return;
+
+       psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
+                                     GFP_KERNEL);
+
+       if (!psb_intel_connector)
+               goto err_connector;
+
+       hdmi_priv = kzalloc(sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL);
+
+       if (!hdmi_priv)
+               goto err_priv;
+
+       connector = &psb_intel_connector->base;
+       encoder = &psb_intel_encoder->base;
+       drm_connector_init(dev, connector,
+                          &cdv_hdmi_connector_funcs,
+                          DRM_MODE_CONNECTOR_DVID);
+
+       drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
+                        DRM_MODE_ENCODER_TMDS);
+
+       psb_intel_connector_attach_encoder(psb_intel_connector,
+                                          psb_intel_encoder);
+       psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
+       hdmi_priv->hdmi_reg = reg;
+       hdmi_priv->has_hdmi_sink = false;
+       psb_intel_encoder->dev_priv = hdmi_priv;
+
+       drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs);
+       drm_connector_helper_add(connector,
+                                &cdv_hdmi_connector_helper_funcs);
+       connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+       connector->interlace_allowed = false;
+       connector->doublescan_allowed = false;
+
+       drm_connector_attach_property(connector,
+                                     dev->mode_config.scaling_mode_property,
+                                     DRM_MODE_SCALE_FULLSCREEN);
+
+       switch (reg) {
+       case SDVOB:
+               ddc_bus = GPIOE;
+               break;
+       case SDVOC:
+               ddc_bus = GPIOD;
+               break;
+       default:
+               DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
+               goto failed_ddc;
+               break;
+       }
+
+       psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
+                               ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC");
+
+       if (!psb_intel_encoder->i2c_bus) {
+               dev_err(dev->dev, "No ddc adapter available!\n");
+               goto failed_ddc;
+       }
+
+       hdmi_priv->hdmi_i2c_adapter =
+                               &(psb_intel_encoder->i2c_bus->adapter);
+       hdmi_priv->dev = dev;
+       drm_sysfs_connector_add(connector);
+       return;
+
+failed_ddc:
+       drm_encoder_cleanup(encoder);
+       drm_connector_cleanup(connector);
+err_priv:
+       kfree(psb_intel_connector);
+err_connector:
+       kfree(psb_intel_encoder);
+}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
new file mode 100644 (file)
index 0000000..50e744b
--- /dev/null
@@ -0,0 +1,732 @@
+/*
+ * Copyright Â© 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ *     Dave Airlie <airlied@linux.ie>
+ *     Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/dmi.h>
+#include <drm/drmP.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+#include "cdv_device.h"
+
+/**
+ * LVDS I2C backlight control macros
+ */
+#define BRIGHTNESS_MAX_LEVEL 100
+#define BRIGHTNESS_MASK 0xFF
+#define BLC_I2C_TYPE   0x01
+#define BLC_PWM_TYPT   0x02
+
+#define BLC_POLARITY_NORMAL 0
+#define BLC_POLARITY_INVERSE 1
+
+#define PSB_BLC_MAX_PWM_REG_FREQ       (0xFFFE)
+#define PSB_BLC_MIN_PWM_REG_FREQ       (0x2)
+#define PSB_BLC_PWM_PRECISION_FACTOR   (10)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT    (16)
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+
+struct cdv_intel_lvds_priv {
+       /**
+        * Saved LVDO output states
+        */
+       uint32_t savePP_ON;
+       uint32_t savePP_OFF;
+       uint32_t saveLVDS;
+       uint32_t savePP_CONTROL;
+       uint32_t savePP_CYCLE;
+       uint32_t savePFIT_CONTROL;
+       uint32_t savePFIT_PGM_RATIOS;
+       uint32_t saveBLC_PWM_CTL;
+};
+
+/*
+ * Returns the maximum level of the backlight duty cycle field.
+ */
+static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 retval;
+
+       if (gma_power_begin(dev, false)) {
+               retval = ((REG_READ(BLC_PWM_CTL) &
+                         BACKLIGHT_MODULATION_FREQ_MASK) >>
+                         BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+               gma_power_end(dev);
+       } else
+               retval = ((dev_priv->saveBLC_PWM_CTL &
+                         BACKLIGHT_MODULATION_FREQ_MASK) >>
+                         BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+       return retval;
+}
+
+/*
+ * Set LVDS backlight level by I2C command
+ */
+static int cdv_lvds_i2c_set_brightness(struct drm_device *dev,
+                                       unsigned int level)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
+       u8 out_buf[2];
+       unsigned int blc_i2c_brightness;
+
+       struct i2c_msg msgs[] = {
+               {
+                       .addr = lvds_i2c_bus->slave_addr,
+                       .flags = 0,
+                       .len = 2,
+                       .buf = out_buf,
+               }
+       };
+
+       blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
+                            BRIGHTNESS_MASK /
+                            BRIGHTNESS_MAX_LEVEL);
+
+       if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+               blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
+
+       out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
+       out_buf[1] = (u8)blc_i2c_brightness;
+
+       if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
+               return 0;
+
+       DRM_ERROR("I2C transfer error\n");
+       return -1;
+}
+
+
+static int cdv_lvds_pwm_set_brightness(struct drm_device *dev, int level)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       u32 max_pwm_blc;
+       u32 blc_pwm_duty_cycle;
+
+       max_pwm_blc = cdv_intel_lvds_get_max_backlight(dev);
+
+       /*BLC_PWM_CTL Should be initiated while backlight device init*/
+       BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0);
+
+       blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
+
+       if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+               blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
+
+       blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+       REG_WRITE(BLC_PWM_CTL,
+                 (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+                 (blc_pwm_duty_cycle));
+
+       return 0;
+}
+
+/*
+ * Set LVDS backlight level either by I2C or PWM
+ */
+void cdv_intel_lvds_set_brightness(struct drm_device *dev, int level)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (!dev_priv->lvds_bl) {
+               DRM_ERROR("NO LVDS Backlight Info\n");
+               return;
+       }
+
+       if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
+               cdv_lvds_i2c_set_brightness(dev, level);
+       else
+               cdv_lvds_pwm_set_brightness(dev, level);
+}
+
+/**
+ * Sets the backlight level.
+ *
+ * level backlight level, from 0 to cdv_intel_lvds_get_max_backlight().
+ */
+static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 blc_pwm_ctl;
+
+       if (gma_power_begin(dev, false)) {
+               blc_pwm_ctl =
+                       REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+               REG_WRITE(BLC_PWM_CTL,
+                               (blc_pwm_ctl |
+                               (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+               gma_power_end(dev);
+       } else {
+               blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL &
+                               ~BACKLIGHT_DUTY_CYCLE_MASK;
+               dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
+                                       (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
+       }
+}
+
+/**
+ * Sets the power state for the panel.
+ */
+static void cdv_intel_lvds_set_power(struct drm_device *dev,
+                                    struct drm_encoder *encoder, bool on)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 pp_status;
+
+       if (!gma_power_begin(dev, true))
+               return;
+
+       if (on) {
+               REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+                         POWER_TARGET_ON);
+               do {
+                       pp_status = REG_READ(PP_STATUS);
+               } while ((pp_status & PP_ON) == 0);
+
+               cdv_intel_lvds_set_backlight(dev,
+                               dev_priv->mode_dev.backlight_duty_cycle);
+       } else {
+               cdv_intel_lvds_set_backlight(dev, 0);
+
+               REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+                         ~POWER_TARGET_ON);
+               do {
+                       pp_status = REG_READ(PP_STATUS);
+               } while (pp_status & PP_ON);
+       }
+       gma_power_end(dev);
+}
+
+static void cdv_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+       if (mode == DRM_MODE_DPMS_ON)
+               cdv_intel_lvds_set_power(dev, encoder, true);
+       else
+               cdv_intel_lvds_set_power(dev, encoder, false);
+       /* XXX: We never power down the LVDS pairs. */
+}
+
+static void cdv_intel_lvds_save(struct drm_connector *connector)
+{
+}
+
+static void cdv_intel_lvds_restore(struct drm_connector *connector)
+{
+}
+
+int cdv_intel_lvds_mode_valid(struct drm_connector *connector,
+                             struct drm_display_mode *mode)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct drm_display_mode *fixed_mode =
+                                       dev_priv->mode_dev.panel_fixed_mode;
+
+       /* just in case */
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
+       /* just in case */
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               return MODE_NO_INTERLACE;
+
+       if (fixed_mode) {
+               if (mode->hdisplay > fixed_mode->hdisplay)
+                       return MODE_PANEL;
+               if (mode->vdisplay > fixed_mode->vdisplay)
+                       return MODE_PANEL;
+       }
+       return MODE_OK;
+}
+
+bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+       struct drm_encoder *tmp_encoder;
+       struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
+
+       /* Should never happen!! */
+       list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
+                           head) {
+               if (tmp_encoder != encoder
+                   && tmp_encoder->crtc == encoder->crtc) {
+                       printk(KERN_ERR "Can't enable LVDS and another "
+                              "encoder on the same pipe\n");
+                       return false;
+               }
+       }
+
+       /*
+        * If we have timings from the BIOS for the panel, put them in
+        * to the adjusted mode.  The CRTC will be set up for this mode,
+        * with the panel scaling set up to source from the H/VDisplay
+        * of the original mode.
+        */
+       if (panel_fixed_mode != NULL) {
+               adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
+               adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
+               adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
+               adjusted_mode->htotal = panel_fixed_mode->htotal;
+               adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
+               adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
+               adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
+               adjusted_mode->vtotal = panel_fixed_mode->vtotal;
+               adjusted_mode->clock = panel_fixed_mode->clock;
+               drm_mode_set_crtcinfo(adjusted_mode,
+                                     CRTC_INTERLACE_HALVE_V);
+       }
+
+       /*
+        * XXX: It would be nice to support lower refresh rates on the
+        * panels to reduce power consumption, and perhaps match the
+        * user's requested refresh rate.
+        */
+
+       return true;
+}
+
+static void cdv_intel_lvds_prepare(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+       if (!gma_power_begin(dev, true))
+               return;
+
+       mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+       mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+                                         BACKLIGHT_DUTY_CYCLE_MASK);
+
+       cdv_intel_lvds_set_power(dev, encoder, false);
+
+       gma_power_end(dev);
+}
+
+static void cdv_intel_lvds_commit(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+       if (mode_dev->backlight_duty_cycle == 0)
+               mode_dev->backlight_duty_cycle =
+                   cdv_intel_lvds_get_max_backlight(dev);
+
+       cdv_intel_lvds_set_power(dev, encoder, true);
+}
+
+static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
+                               struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 pfit_control;
+
+       /*
+        * The LVDS pin pair will already have been turned on in the
+        * cdv_intel_crtc_mode_set since it has a large impact on the DPLL
+        * settings.
+        */
+
+       /*
+        * Enable automatic panel scaling so that non-native modes fill the
+        * screen.  Should be enabled before the pipe is enabled, according to
+        * register description and PRM.
+        */
+       if (mode->hdisplay != adjusted_mode->hdisplay ||
+           mode->vdisplay != adjusted_mode->vdisplay)
+               pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
+                               HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
+                               HORIZ_INTERP_BILINEAR);
+       else
+               pfit_control = 0;
+
+       if (dev_priv->lvds_dither)
+               pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
+       REG_WRITE(PFIT_CONTROL, pfit_control);
+}
+
+/**
+ * Detect the LVDS connection.
+ *
+ * This always returns CONNECTOR_STATUS_CONNECTED.
+ * This connector should only have
+ * been set up if the LVDS was actually connected anyway.
+ */
+static enum drm_connector_status cdv_intel_lvds_detect(
+                               struct drm_connector *connector, bool force)
+{
+       return connector_status_connected;
+}
+
+/**
+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+ */
+static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+       int ret;
+
+       ret = psb_intel_ddc_get_modes(connector, &psb_intel_encoder->i2c_bus->adapter);
+
+       if (ret)
+               return ret;
+
+       /* Didn't get an EDID, so
+        * Set wide sync ranges so we get all modes
+        * handed to valid_mode for checking
+        */
+       connector->display_info.min_vfreq = 0;
+       connector->display_info.max_vfreq = 200;
+       connector->display_info.min_hfreq = 0;
+       connector->display_info.max_hfreq = 200;
+       if (mode_dev->panel_fixed_mode != NULL) {
+               struct drm_display_mode *mode =
+                   drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
+               drm_mode_probed_add(connector, mode);
+               return 1;
+       }
+
+       return 0;
+}
+
+/**
+ * cdv_intel_lvds_destroy - unregister and free LVDS structures
+ * @connector: connector to free
+ *
+ * Unregister the DDC bus for this connector then free the driver private
+ * structure.
+ */
+void cdv_intel_lvds_destroy(struct drm_connector *connector)
+{
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+
+       if (psb_intel_encoder->i2c_bus)
+               psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+       drm_sysfs_connector_remove(connector);
+       drm_connector_cleanup(connector);
+       kfree(connector);
+}
+
+int cdv_intel_lvds_set_property(struct drm_connector *connector,
+                                      struct drm_property *property,
+                                      uint64_t value)
+{
+       struct drm_encoder *encoder = connector->encoder;
+
+       if (!strcmp(property->name, "scaling mode") && encoder) {
+               struct psb_intel_crtc *crtc =
+                                       to_psb_intel_crtc(encoder->crtc);
+               uint64_t curValue;
+
+               if (!crtc)
+                       return -1;
+
+               switch (value) {
+               case DRM_MODE_SCALE_FULLSCREEN:
+                       break;
+               case DRM_MODE_SCALE_NO_SCALE:
+                       break;
+               case DRM_MODE_SCALE_ASPECT:
+                       break;
+               default:
+                       return -1;
+               }
+
+               if (drm_connector_property_get_value(connector,
+                                                    property,
+                                                    &curValue))
+                       return -1;
+
+               if (curValue == value)
+                       return 0;
+
+               if (drm_connector_property_set_value(connector,
+                                                       property,
+                                                       value))
+                       return -1;
+
+               if (crtc->saved_mode.hdisplay != 0 &&
+                   crtc->saved_mode.vdisplay != 0) {
+                       if (!drm_crtc_helper_set_mode(encoder->crtc,
+                                                     &crtc->saved_mode,
+                                                     encoder->crtc->x,
+                                                     encoder->crtc->y,
+                                                     encoder->crtc->fb))
+                               return -1;
+               }
+       } else if (!strcmp(property->name, "backlight") && encoder) {
+               if (drm_connector_property_set_value(connector,
+                                                       property,
+                                                       value))
+                       return -1;
+               else {
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+                       struct drm_psb_private *dev_priv =
+                                               encoder->dev->dev_private;
+                       struct backlight_device *bd =
+                                               dev_priv->backlight_device;
+                       bd->props.brightness = value;
+                       backlight_update_status(bd);
+#endif
+               }
+       } else if (!strcmp(property->name, "DPMS") && encoder) {
+               struct drm_encoder_helper_funcs *helpers =
+                                       encoder->helper_private;
+               helpers->dpms(encoder, value);
+       }
+       return 0;
+}
+
+static const struct drm_encoder_helper_funcs
+                                       cdv_intel_lvds_helper_funcs = {
+       .dpms = cdv_intel_lvds_encoder_dpms,
+       .mode_fixup = cdv_intel_lvds_mode_fixup,
+       .prepare = cdv_intel_lvds_prepare,
+       .mode_set = cdv_intel_lvds_mode_set,
+       .commit = cdv_intel_lvds_commit,
+};
+
+static const struct drm_connector_helper_funcs
+                               cdv_intel_lvds_connector_helper_funcs = {
+       .get_modes = cdv_intel_lvds_get_modes,
+       .mode_valid = cdv_intel_lvds_mode_valid,
+       .best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .save = cdv_intel_lvds_save,
+       .restore = cdv_intel_lvds_restore,
+       .detect = cdv_intel_lvds_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .set_property = cdv_intel_lvds_set_property,
+       .destroy = cdv_intel_lvds_destroy,
+};
+
+
+static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+       drm_encoder_cleanup(encoder);
+}
+
+const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
+       .destroy = cdv_intel_lvds_enc_destroy,
+};
+
+/**
+ * cdv_intel_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void cdv_intel_lvds_init(struct drm_device *dev,
+                    struct psb_intel_mode_device *mode_dev)
+{
+       struct psb_intel_encoder *psb_intel_encoder;
+       struct psb_intel_connector *psb_intel_connector;
+       struct cdv_intel_lvds_priv *lvds_priv;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+       struct drm_display_mode *scan;
+       struct drm_crtc *crtc;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 lvds;
+       int pipe;
+
+       psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
+                                   GFP_KERNEL);
+       if (!psb_intel_encoder)
+               return;
+
+       psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
+                                     GFP_KERNEL);
+       if (!psb_intel_connector)
+               goto failed_connector;
+
+       lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL);
+       if (!lvds_priv)
+               goto failed_lvds_priv;
+
+       psb_intel_encoder->dev_priv = lvds_priv;
+
+       connector = &psb_intel_connector->base;
+       encoder = &psb_intel_encoder->base;
+
+
+       drm_connector_init(dev, connector,
+                          &cdv_intel_lvds_connector_funcs,
+                          DRM_MODE_CONNECTOR_LVDS);
+
+       drm_encoder_init(dev, encoder,
+                        &cdv_intel_lvds_enc_funcs,
+                        DRM_MODE_ENCODER_LVDS);
+
+
+       psb_intel_connector_attach_encoder(psb_intel_connector,
+                                          psb_intel_encoder);
+       psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+
+       drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs);
+       drm_connector_helper_add(connector,
+                                &cdv_intel_lvds_connector_helper_funcs);
+       connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+       connector->interlace_allowed = false;
+       connector->doublescan_allowed = false;
+
+       /*Attach connector properties*/
+       drm_connector_attach_property(connector,
+                                     dev->mode_config.scaling_mode_property,
+                                     DRM_MODE_SCALE_FULLSCREEN);
+       drm_connector_attach_property(connector,
+                                     dev_priv->backlight_property,
+                                     BRIGHTNESS_MAX_LEVEL);
+
+       /**
+        * Set up I2C bus
+        * FIXME: distroy i2c_bus when exit
+        */
+       psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
+                                                        GPIOB,
+                                                        "LVDSBLC_B");
+       if (!psb_intel_encoder->i2c_bus) {
+               dev_printk(KERN_ERR,
+                       &dev->pdev->dev, "I2C bus registration failed.\n");
+               goto failed_blc_i2c;
+       }
+       psb_intel_encoder->i2c_bus->slave_addr = 0x2C;
+       dev_priv->lvds_i2c_bus = psb_intel_encoder->i2c_bus;
+
+       /*
+        * LVDS discovery:
+        * 1) check for EDID on DDC
+        * 2) check for VBT data
+        * 3) check to see if LVDS is already on
+        *    if none of the above, no panel
+        * 4) make sure lid is open
+        *    if closed, act like it's not there for now
+        */
+
+       /* Set up the DDC bus. */
+       psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
+                                                        GPIOC,
+                                                        "LVDSDDC_C");
+       if (!psb_intel_encoder->ddc_bus) {
+               dev_printk(KERN_ERR, &dev->pdev->dev,
+                          "DDC bus registration " "failed.\n");
+               goto failed_ddc;
+       }
+
+       /*
+        * Attempt to get the fixed panel mode from DDC.  Assume that the
+        * preferred mode is the right one.
+        */
+       psb_intel_ddc_get_modes(connector,
+                               &psb_intel_encoder->ddc_bus->adapter);
+       list_for_each_entry(scan, &connector->probed_modes, head) {
+               if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+                       mode_dev->panel_fixed_mode =
+                           drm_mode_duplicate(dev, scan);
+                       goto out;       /* FIXME: check for quirks */
+               }
+       }
+
+       /* Failed to get EDID, what about VBT? do we need this?*/
+       if (dev_priv->lfp_lvds_vbt_mode) {
+               mode_dev->panel_fixed_mode =
+                       drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+               if (mode_dev->panel_fixed_mode) {
+                       mode_dev->panel_fixed_mode->type |=
+                               DRM_MODE_TYPE_PREFERRED;
+                       goto out;       /* FIXME: check for quirks */
+               }
+       }
+       /*
+        * If we didn't get EDID, try checking if the panel is already turned
+        * on.  If so, assume that whatever is currently programmed is the
+        * correct mode.
+        */
+       lvds = REG_READ(LVDS);
+       pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+       crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
+
+       if (crtc && (lvds & LVDS_PORT_EN)) {
+               mode_dev->panel_fixed_mode =
+                   cdv_intel_crtc_mode_get(dev, crtc);
+               if (mode_dev->panel_fixed_mode) {
+                       mode_dev->panel_fixed_mode->type |=
+                           DRM_MODE_TYPE_PREFERRED;
+                       goto out;       /* FIXME: check for quirks */
+               }
+       }
+
+       /* If we still don't have a mode after all that, give up. */
+       if (!mode_dev->panel_fixed_mode) {
+               DRM_DEBUG
+                       ("Found no modes on the lvds, ignoring the LVDS\n");
+               goto failed_find;
+       }
+
+out:
+       drm_sysfs_connector_add(connector);
+       return;
+
+failed_find:
+       printk(KERN_ERR "Failed find\n");
+       if (psb_intel_encoder->ddc_bus)
+               psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+failed_ddc:
+       printk(KERN_ERR "Failed DDC\n");
+       if (psb_intel_encoder->i2c_bus)
+               psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+failed_blc_i2c:
+       printk(KERN_ERR "Failed BLC\n");
+       drm_encoder_cleanup(encoder);
+       drm_connector_cleanup(connector);
+       kfree(lvds_priv);
+failed_lvds_priv:
+       kfree(psb_intel_connector);
+failed_connector:
+       kfree(psb_intel_encoder);
+}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
new file mode 100644 (file)
index 0000000..791c0ef
--- /dev/null
@@ -0,0 +1,831 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/console.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_drv.h"
+#include "framebuffer.h"
+#include "gtt.h"
+
+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+                                             struct drm_file *file_priv,
+                                             unsigned int *handle);
+
+static const struct drm_framebuffer_funcs psb_fb_funcs = {
+       .destroy = psb_user_framebuffer_destroy,
+       .create_handle = psb_user_framebuffer_create_handle,
+};
+
+#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
+
+static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
+                          unsigned blue, unsigned transp,
+                          struct fb_info *info)
+{
+       struct psb_fbdev *fbdev = info->par;
+       struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
+       uint32_t v;
+
+       if (!fb)
+               return -ENOMEM;
+
+       if (regno > 255)
+               return 1;
+
+       red = CMAP_TOHW(red, info->var.red.length);
+       blue = CMAP_TOHW(blue, info->var.blue.length);
+       green = CMAP_TOHW(green, info->var.green.length);
+       transp = CMAP_TOHW(transp, info->var.transp.length);
+
+       v = (red << info->var.red.offset) |
+           (green << info->var.green.offset) |
+           (blue << info->var.blue.offset) |
+           (transp << info->var.transp.offset);
+
+       if (regno < 16) {
+               switch (fb->bits_per_pixel) {
+               case 16:
+                       ((uint32_t *) info->pseudo_palette)[regno] = v;
+                       break;
+               case 24:
+               case 32:
+                       ((uint32_t *) info->pseudo_palette)[regno] = v;
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+       struct psb_fbdev *fbdev = info->par;
+       struct psb_framebuffer *psbfb = &fbdev->pfb;
+       struct drm_device *dev = psbfb->base.dev;
+
+       /*
+        *      We have to poke our nose in here. The core fb code assumes
+        *      panning is part of the hardware that can be invoked before
+        *      the actual fb is mapped. In our case that isn't quite true.
+        */
+       if (psbfb->gtt->npage) {
+               /* GTT roll shifts in 4K pages, we need to shift the right
+                  number of pages */
+               int pages = info->fix.line_length >> 12;
+               psb_gtt_roll(dev, psbfb->gtt, var->yoffset * pages);
+       }
+        return 0;
+}
+
+void psbfb_suspend(struct drm_device *dev)
+{
+       struct drm_framebuffer *fb = 0;
+       struct psb_framebuffer *psbfb = to_psb_fb(fb);
+
+       console_lock();
+       mutex_lock(&dev->mode_config.mutex);
+       list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+               struct fb_info *info = psbfb->fbdev;
+               fb_set_suspend(info, 1);
+               drm_fb_helper_blank(FB_BLANK_POWERDOWN, info);
+       }
+       mutex_unlock(&dev->mode_config.mutex);
+       console_unlock();
+}
+
+void psbfb_resume(struct drm_device *dev)
+{
+       struct drm_framebuffer *fb = 0;
+       struct psb_framebuffer *psbfb = to_psb_fb(fb);
+
+       console_lock();
+       mutex_lock(&dev->mode_config.mutex);
+       list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+               struct fb_info *info = psbfb->fbdev;
+               fb_set_suspend(info, 0);
+               drm_fb_helper_blank(FB_BLANK_UNBLANK, info);
+       }
+       mutex_unlock(&dev->mode_config.mutex);
+       console_unlock();
+       drm_helper_disable_unused_functions(dev);
+}
+
+static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct psb_framebuffer *psbfb = vma->vm_private_data;
+       struct drm_device *dev = psbfb->base.dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int page_num;
+       int i;
+       unsigned long address;
+       int ret;
+       unsigned long pfn;
+       /* FIXME: assumes fb at stolen base which may not be true */
+       unsigned long phys_addr = (unsigned long)dev_priv->stolen_base;
+
+       page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+       address = (unsigned long)vmf->virtual_address;
+
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+       for (i = 0; i < page_num; i++) {
+               pfn = (phys_addr >> PAGE_SHIFT);
+
+               ret = vm_insert_mixed(vma, address, pfn);
+               if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
+                       break;
+               else if (unlikely(ret != 0)) {
+                       ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+                       return ret;
+               }
+               address += PAGE_SIZE;
+               phys_addr += PAGE_SIZE;
+       }
+       return VM_FAULT_NOPAGE;
+}
+
+static void psbfb_vm_open(struct vm_area_struct *vma)
+{
+}
+
+static void psbfb_vm_close(struct vm_area_struct *vma)
+{
+}
+
+static struct vm_operations_struct psbfb_vm_ops = {
+       .fault  = psbfb_vm_fault,
+       .open   = psbfb_vm_open,
+       .close  = psbfb_vm_close
+};
+
+static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+       struct psb_fbdev *fbdev = info->par;
+       struct psb_framebuffer *psbfb = &fbdev->pfb;
+
+       if (vma->vm_pgoff != 0)
+               return -EINVAL;
+       if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+               return -EINVAL;
+
+       if (!psbfb->addr_space)
+               psbfb->addr_space = vma->vm_file->f_mapping;
+       /*
+        * If this is a GEM object then info->screen_base is the virtual
+        * kernel remapping of the object. FIXME: Review if this is
+        * suitable for our mmap work
+        */
+       vma->vm_ops = &psbfb_vm_ops;
+       vma->vm_private_data = (void *)psbfb;
+       vma->vm_flags |= VM_RESERVED | VM_IO |
+                                       VM_MIXEDMAP | VM_DONTEXPAND;
+       return 0;
+}
+
+static int psbfb_ioctl(struct fb_info *info, unsigned int cmd,
+                                               unsigned long arg)
+{
+       return -ENOTTY;
+}
+
+static struct fb_ops psbfb_ops = {
+       .owner = THIS_MODULE,
+       .fb_check_var = drm_fb_helper_check_var,
+       .fb_set_par = drm_fb_helper_set_par,
+       .fb_blank = drm_fb_helper_blank,
+       .fb_setcolreg = psbfb_setcolreg,
+       .fb_fillrect = cfb_fillrect,
+       .fb_copyarea = psbfb_copyarea,
+       .fb_imageblit = cfb_imageblit,
+       .fb_mmap = psbfb_mmap,
+       .fb_sync = psbfb_sync,
+       .fb_ioctl = psbfb_ioctl,
+};
+
+static struct fb_ops psbfb_roll_ops = {
+       .owner = THIS_MODULE,
+       .fb_check_var = drm_fb_helper_check_var,
+       .fb_set_par = drm_fb_helper_set_par,
+       .fb_blank = drm_fb_helper_blank,
+       .fb_setcolreg = psbfb_setcolreg,
+       .fb_fillrect = cfb_fillrect,
+       .fb_copyarea = cfb_copyarea,
+       .fb_imageblit = cfb_imageblit,
+       .fb_pan_display = psbfb_pan,
+       .fb_mmap = psbfb_mmap,
+       .fb_sync = psbfb_sync,
+       .fb_ioctl = psbfb_ioctl,
+};
+
+static struct fb_ops psbfb_unaccel_ops = {
+       .owner = THIS_MODULE,
+       .fb_check_var = drm_fb_helper_check_var,
+       .fb_set_par = drm_fb_helper_set_par,
+       .fb_blank = drm_fb_helper_blank,
+       .fb_setcolreg = psbfb_setcolreg,
+       .fb_fillrect = cfb_fillrect,
+       .fb_copyarea = cfb_copyarea,
+       .fb_imageblit = cfb_imageblit,
+       .fb_mmap = psbfb_mmap,
+       .fb_ioctl = psbfb_ioctl,
+};
+
+/**
+ *     psb_framebuffer_init    -       initialize a framebuffer
+ *     @dev: our DRM device
+ *     @fb: framebuffer to set up
+ *     @mode_cmd: mode description
+ *     @gt: backing object
+ *
+ *     Configure and fill in the boilerplate for our frame buffer. Return
+ *     0 on success or an error code if we fail.
+ */
+static int psb_framebuffer_init(struct drm_device *dev,
+                                       struct psb_framebuffer *fb,
+                                       struct drm_mode_fb_cmd2 *mode_cmd,
+                                       struct gtt_range *gt)
+{
+       u32 bpp, depth;
+       int ret;
+
+       drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+       if (mode_cmd->pitches[0] & 63)
+               return -EINVAL;
+       switch (bpp) {
+       case 8:
+       case 16:
+       case 24:
+       case 32:
+               break;
+       default:
+               return -EINVAL;
+       }
+       ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
+       if (ret) {
+               dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+               return ret;
+       }
+       drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+       fb->gtt = gt;
+       return 0;
+}
+
+/**
+ *     psb_framebuffer_create  -       create a framebuffer backed by gt
+ *     @dev: our DRM device
+ *     @mode_cmd: the description of the requested mode
+ *     @gt: the backing object
+ *
+ *     Create a framebuffer object backed by the gt, and fill in the
+ *     boilerplate required
+ *
+ *     TODO: review object references
+ */
+
+static struct drm_framebuffer *psb_framebuffer_create
+                       (struct drm_device *dev,
+                        struct drm_mode_fb_cmd2 *mode_cmd,
+                        struct gtt_range *gt)
+{
+       struct psb_framebuffer *fb;
+       int ret;
+
+       fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+       if (!fb)
+               return ERR_PTR(-ENOMEM);
+
+       ret = psb_framebuffer_init(dev, fb, mode_cmd, gt);
+       if (ret) {
+               kfree(fb);
+               return ERR_PTR(ret);
+       }
+       return &fb->base;
+}
+
+/**
+ *     psbfb_alloc             -       allocate frame buffer memory
+ *     @dev: the DRM device
+ *     @aligned_size: space needed
+ *     @force: fall back to GEM buffers if need be
+ *
+ *     Allocate the frame buffer. In the usual case we get a GTT range that
+ *     is stolen memory backed and life is simple. If there isn't sufficient
+ *     we fail as we don't have the virtual mapping space to really vmap it
+ *     and the kernel console code can't handle non linear framebuffers.
+ *
+ *     Re-address this as and if the framebuffer layer grows this ability.
+ */
+static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
+{
+       struct gtt_range *backing;
+       /* Begin by trying to use stolen memory backing */
+       backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
+       if (backing) {
+               if (drm_gem_private_object_init(dev,
+                                       &backing->gem, aligned_size) == 0)
+                       return backing;
+               psb_gtt_free_range(dev, backing);
+       }
+       return NULL;
+}
+
+/**
+ *     psbfb_create            -       create a framebuffer
+ *     @fbdev: the framebuffer device
+ *     @sizes: specification of the layout
+ *
+ *     Create a framebuffer to the specifications provided
+ */
+static int psbfb_create(struct psb_fbdev *fbdev,
+                               struct drm_fb_helper_surface_size *sizes)
+{
+       struct drm_device *dev = fbdev->psb_fb_helper.dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct fb_info *info;
+       struct drm_framebuffer *fb;
+       struct psb_framebuffer *psbfb = &fbdev->pfb;
+       struct drm_mode_fb_cmd2 mode_cmd;
+       struct device *device = &dev->pdev->dev;
+       int size;
+       int ret;
+       struct gtt_range *backing;
+       u32 bpp, depth;
+       int gtt_roll = 0;
+       int pitch_lines = 0;
+
+       mode_cmd.width = sizes->surface_width;
+       mode_cmd.height = sizes->surface_height;
+       bpp = sizes->surface_bpp;
+
+       /* No 24bit packed */
+       if (bpp == 24)
+               bpp = 32;
+
+       do {
+               /*
+                * Acceleration via the GTT requires pitch to be
+                * power of two aligned. Preferably page but less
+                * is ok with some fonts
+                */
+               mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
+               depth = sizes->surface_depth;
+
+               size = mode_cmd.pitches[0] * mode_cmd.height;
+               size = ALIGN(size, PAGE_SIZE);
+
+               /* Allocate the fb in the GTT with stolen page backing */
+               backing = psbfb_alloc(dev, size);
+
+               if (pitch_lines)
+                       pitch_lines *= 2;
+               else
+                       pitch_lines = 1;
+               gtt_roll++;
+       } while (backing == NULL && pitch_lines <= 16);
+
+       /* The final pitch we accepted if we succeeded */
+       pitch_lines /= 2;
+
+       if (backing == NULL) {
+               /*
+                *      We couldn't get the space we wanted, fall back to the
+                *      display engine requirement instead.  The HW requires
+                *      the pitch to be 64 byte aligned
+                */
+
+               gtt_roll = 0;   /* Don't use GTT accelerated scrolling */
+               pitch_lines = 64;
+
+               mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
+
+               size = mode_cmd.pitches[0] * mode_cmd.height;
+               size = ALIGN(size, PAGE_SIZE);
+
+               /* Allocate the framebuffer in the GTT with stolen page backing */
+               backing = psbfb_alloc(dev, size);
+               if (backing == NULL)
+                       return -ENOMEM;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+
+       info = framebuffer_alloc(0, device);
+       if (!info) {
+               ret = -ENOMEM;
+               goto out_err1;
+       }
+       info->par = fbdev;
+
+       mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
+       ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
+       if (ret)
+               goto out_unref;
+
+       fb = &psbfb->base;
+       psbfb->fbdev = info;
+
+       fbdev->psb_fb_helper.fb = fb;
+       fbdev->psb_fb_helper.fbdev = info;
+
+       strcpy(info->fix.id, "psbfb");
+
+       info->flags = FBINFO_DEFAULT;
+       if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
+               info->fbops = &psbfb_ops;
+       else if (gtt_roll) {    /* GTT rolling seems best */
+               info->fbops = &psbfb_roll_ops;
+               info->flags |= FBINFO_HWACCEL_YPAN;
+       } else  /* Software */
+               info->fbops = &psbfb_unaccel_ops;
+
+       ret = fb_alloc_cmap(&info->cmap, 256, 0);
+       if (ret) {
+               ret = -ENOMEM;
+               goto out_unref;
+       }
+
+       info->fix.smem_start = dev->mode_config.fb_base;
+       info->fix.smem_len = size;
+       info->fix.ywrapstep = gtt_roll;
+       info->fix.ypanstep = 0;
+
+       /* Accessed stolen memory directly */
+       info->screen_base = (char *)dev_priv->vram_addr +
+                                                       backing->offset;
+       info->screen_size = size;
+
+       if (dev_priv->gtt.stolen_size) {
+               info->apertures = alloc_apertures(1);
+               if (!info->apertures) {
+                       ret = -ENOMEM;
+                       goto out_unref;
+               }
+               info->apertures->ranges[0].base = dev->mode_config.fb_base;
+               info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
+       }
+
+       drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+       drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
+                               sizes->fb_width, sizes->fb_height);
+
+       info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
+       info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
+
+       info->pixmap.size = 64 * 1024;
+       info->pixmap.buf_align = 8;
+       info->pixmap.access_align = 32;
+       info->pixmap.flags = FB_PIXMAP_SYSTEM;
+       info->pixmap.scan_align = 1;
+
+       dev_info(dev->dev, "allocated %dx%d fb\n",
+                                       psbfb->base.width, psbfb->base.height);
+
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+out_unref:
+       if (backing->stolen)
+               psb_gtt_free_range(dev, backing);
+       else
+               drm_gem_object_unreference(&backing->gem);
+out_err1:
+       mutex_unlock(&dev->struct_mutex);
+       psb_gtt_free_range(dev, backing);
+       return ret;
+}
+
+/**
+ *     psb_user_framebuffer_create     -       create framebuffer
+ *     @dev: our DRM device
+ *     @filp: client file
+ *     @cmd: mode request
+ *
+ *     Create a new framebuffer backed by a userspace GEM object
+ */
+static struct drm_framebuffer *psb_user_framebuffer_create
+                       (struct drm_device *dev, struct drm_file *filp,
+                        struct drm_mode_fb_cmd2 *cmd)
+{
+       struct gtt_range *r;
+       struct drm_gem_object *obj;
+
+       /*
+        *      Find the GEM object and thus the gtt range object that is
+        *      to back this space
+        */
+       obj = drm_gem_object_lookup(dev, filp, cmd->handles[0]);
+       if (obj == NULL)
+               return ERR_PTR(-ENOENT);
+
+       /* Let the core code do all the work */
+       r = container_of(obj, struct gtt_range, gem);
+       return psb_framebuffer_create(dev, cmd, r);
+}
+
+static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+                                                       u16 blue, int regno)
+{
+}
+
+static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
+                                       u16 *green, u16 *blue, int regno)
+{
+}
+
+static int psbfb_probe(struct drm_fb_helper *helper,
+                               struct drm_fb_helper_surface_size *sizes)
+{
+       struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
+       int new_fb = 0;
+       int ret;
+
+       if (!helper->fb) {
+               ret = psbfb_create(psb_fbdev, sizes);
+               if (ret)
+                       return ret;
+               new_fb = 1;
+       }
+       return new_fb;
+}
+
+struct drm_fb_helper_funcs psb_fb_helper_funcs = {
+       .gamma_set = psbfb_gamma_set,
+       .gamma_get = psbfb_gamma_get,
+       .fb_probe = psbfb_probe,
+};
+
+int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
+{
+       struct fb_info *info;
+       struct psb_framebuffer *psbfb = &fbdev->pfb;
+
+       if (fbdev->psb_fb_helper.fbdev) {
+               info = fbdev->psb_fb_helper.fbdev;
+               unregister_framebuffer(info);
+               if (info->cmap.len)
+                       fb_dealloc_cmap(&info->cmap);
+               framebuffer_release(info);
+       }
+       drm_fb_helper_fini(&fbdev->psb_fb_helper);
+       drm_framebuffer_cleanup(&psbfb->base);
+
+       if (psbfb->gtt)
+               drm_gem_object_unreference(&psbfb->gtt->gem);
+       return 0;
+}
+
+int psb_fbdev_init(struct drm_device *dev)
+{
+       struct psb_fbdev *fbdev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
+       if (!fbdev) {
+               dev_err(dev->dev, "no memory\n");
+               return -ENOMEM;
+       }
+
+       dev_priv->fbdev = fbdev;
+       fbdev->psb_fb_helper.funcs = &psb_fb_helper_funcs;
+
+       drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs,
+                                                       INTELFB_CONN_LIMIT);
+
+       drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
+       drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
+       return 0;
+}
+
+void psb_fbdev_fini(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (!dev_priv->fbdev)
+               return;
+
+       psb_fbdev_destroy(dev, dev_priv->fbdev);
+       kfree(dev_priv->fbdev);
+       dev_priv->fbdev = NULL;
+}
+
+static void psbfb_output_poll_changed(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_fbdev *fbdev = (struct psb_fbdev *)dev_priv->fbdev;
+       drm_fb_helper_hotplug_event(&fbdev->psb_fb_helper);
+}
+
+/**
+ *     psb_user_framebuffer_create_handle - add hamdle to a framebuffer
+ *     @fb: framebuffer
+ *     @file_priv: our DRM file
+ *     @handle: returned handle
+ *
+ *     Our framebuffer object is a GTT range which also contains a GEM
+ *     object. We need to turn it into a handle for userspace. GEM will do
+ *     the work for us
+ */
+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+                                             struct drm_file *file_priv,
+                                             unsigned int *handle)
+{
+       struct psb_framebuffer *psbfb = to_psb_fb(fb);
+       struct gtt_range *r = psbfb->gtt;
+       return drm_gem_handle_create(file_priv, &r->gem, handle);
+}
+
+/**
+ *     psb_user_framebuffer_destroy    -       destruct user created fb
+ *     @fb: framebuffer
+ *
+ *     User framebuffers are backed by GEM objects so all we have to do is
+ *     clean up a bit and drop the reference, GEM will handle the fallout
+ */
+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+       struct psb_framebuffer *psbfb = to_psb_fb(fb);
+       struct gtt_range *r = psbfb->gtt;
+       struct drm_device *dev = fb->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_fbdev *fbdev = dev_priv->fbdev;
+       struct drm_crtc *crtc;
+       int reset = 0;
+
+       /* Should never get stolen memory for a user fb */
+       WARN_ON(r->stolen);
+
+       /* Check if we are erroneously live */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+               if (crtc->fb == fb)
+                       reset = 1;
+
+       if (reset)
+               /*
+                * Now force a sane response before we permit the DRM CRTC
+                * layer to do stupid things like blank the display. Instead
+                * we reset this framebuffer as if the user had forced a reset.
+                * We must do this before the cleanup so that the DRM layer
+                * doesn't get a chance to stick its oar in where it isn't
+                * wanted.
+                */
+               drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
+
+       /* Let DRM do its clean up */
+       drm_framebuffer_cleanup(fb);
+       /*  We are no longer using the resource in GEM */
+       drm_gem_object_unreference_unlocked(&r->gem);
+       kfree(fb);
+}
+
+static const struct drm_mode_config_funcs psb_mode_funcs = {
+       .fb_create = psb_user_framebuffer_create,
+       .output_poll_changed = psbfb_output_poll_changed,
+};
+
+static int psb_create_backlight_property(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct drm_property *backlight;
+
+       if (dev_priv->backlight_property)
+               return 0;
+
+       backlight = drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                                       "backlight", 2);
+       backlight->values[0] = 0;
+       backlight->values[1] = 100;
+
+       dev_priv->backlight_property = backlight;
+
+       return 0;
+}
+
+static void psb_setup_outputs(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct drm_connector *connector;
+
+       drm_mode_create_scaling_mode_property(dev);
+       psb_create_backlight_property(dev);
+
+       dev_priv->ops->output_init(dev);
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list,
+                           head) {
+               struct psb_intel_encoder *psb_intel_encoder =
+                       psb_intel_attached_encoder(connector);
+               struct drm_encoder *encoder = &psb_intel_encoder->base;
+               int crtc_mask = 0, clone_mask = 0;
+
+               /* valid crtcs */
+               switch (psb_intel_encoder->type) {
+               case INTEL_OUTPUT_ANALOG:
+                       crtc_mask = (1 << 0);
+                       clone_mask = (1 << INTEL_OUTPUT_ANALOG);
+                       break;
+               case INTEL_OUTPUT_SDVO:
+                       crtc_mask = ((1 << 0) | (1 << 1));
+                       clone_mask = (1 << INTEL_OUTPUT_SDVO);
+                       break;
+               case INTEL_OUTPUT_LVDS:
+                       if (IS_MRST(dev))
+                               crtc_mask = (1 << 0);
+                       else
+                               crtc_mask = (1 << 1);
+                       clone_mask = (1 << INTEL_OUTPUT_LVDS);
+                       break;
+               case INTEL_OUTPUT_MIPI:
+                       crtc_mask = (1 << 0);
+                       clone_mask = (1 << INTEL_OUTPUT_MIPI);
+                       break;
+               case INTEL_OUTPUT_MIPI2:
+                       crtc_mask = (1 << 2);
+                       clone_mask = (1 << INTEL_OUTPUT_MIPI2);
+                       break;
+               case INTEL_OUTPUT_HDMI:
+                       if (IS_MFLD(dev))
+                               crtc_mask = (1 << 1);
+                       else    
+                               crtc_mask = (1 << 0);
+                       clone_mask = (1 << INTEL_OUTPUT_HDMI);
+                       break;
+               }
+               encoder->possible_crtcs = crtc_mask;
+               encoder->possible_clones =
+                   psb_intel_connector_clones(dev, clone_mask);
+       }
+}
+
+void psb_modeset_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+       int i;
+
+       drm_mode_config_init(dev);
+
+       dev->mode_config.min_width = 0;
+       dev->mode_config.min_height = 0;
+
+       dev->mode_config.funcs = (void *) &psb_mode_funcs;
+
+       /* set memory base */
+       /* Oaktrail and Poulsbo should use BAR 2*/
+       pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
+                                       &(dev->mode_config.fb_base));
+
+       /* num pipes is 2 for PSB but 1 for Mrst */
+       for (i = 0; i < dev_priv->num_pipe; i++)
+               psb_intel_crtc_init(dev, i, mode_dev);
+
+       dev->mode_config.max_width = 2048;
+       dev->mode_config.max_height = 2048;
+
+       psb_setup_outputs(dev);
+}
+
+void psb_modeset_cleanup(struct drm_device *dev)
+{
+       mutex_lock(&dev->struct_mutex);
+
+       drm_kms_helper_poll_fini(dev);
+       psb_fbdev_fini(dev);
+       drm_mode_config_cleanup(dev);
+
+       mutex_unlock(&dev->struct_mutex);
+}
diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
new file mode 100644 (file)
index 0000000..989558a
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2008-2011, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *      Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _FRAMEBUFFER_H_
+#define _FRAMEBUFFER_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+
+#include "psb_drv.h"
+
+struct psb_framebuffer {
+       struct drm_framebuffer base;
+       struct address_space *addr_space;
+       struct fb_info *fbdev;
+       struct gtt_range *gtt;
+};
+
+struct psb_fbdev {
+       struct drm_fb_helper psb_fb_helper;
+       struct psb_framebuffer pfb;
+};
+
+#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
+
+extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask);
+
+#endif
+
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
new file mode 100644 (file)
index 0000000..9fbb868
--- /dev/null
@@ -0,0 +1,292 @@
+/*
+ *  psb GEM interface
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Alan Cox
+ *
+ * TODO:
+ *     -       we need to work out if the MMU is relevant (eg for
+ *             accelerated operations on a GEM object)
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+
+int psb_gem_init_object(struct drm_gem_object *obj)
+{
+       return -EINVAL;
+}
+
+void psb_gem_free_object(struct drm_gem_object *obj)
+{
+       struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
+       drm_gem_object_release_wrap(obj);
+       /* This must occur last as it frees up the memory of the GEM object */
+       psb_gtt_free_range(obj->dev, gtt);
+}
+
+int psb_gem_get_aperture(struct drm_device *dev, void *data,
+                               struct drm_file *file)
+{
+       return -EINVAL;
+}
+
+/**
+ *     psb_gem_dumb_map_gtt    -       buffer mapping for dumb interface
+ *     @file: our drm client file
+ *     @dev: drm device
+ *     @handle: GEM handle to the object (from dumb_create)
+ *
+ *     Do the necessary setup to allow the mapping of the frame buffer
+ *     into user memory. We don't have to do much here at the moment.
+ */
+int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
+                        uint32_t handle, uint64_t *offset)
+{
+       int ret = 0;
+       struct drm_gem_object *obj;
+
+       if (!(dev->driver->driver_features & DRIVER_GEM))
+               return -ENODEV;
+
+       mutex_lock(&dev->struct_mutex);
+
+       /* GEM does all our handle to object mapping */
+       obj = drm_gem_object_lookup(dev, file, handle);
+       if (obj == NULL) {
+               ret = -ENOENT;
+               goto unlock;
+       }
+       /* What validation is needed here ? */
+
+       /* Make it mmapable */
+       if (!obj->map_list.map) {
+               ret = gem_create_mmap_offset(obj);
+               if (ret)
+                       goto out;
+       }
+       /* GEM should really work out the hash offsets for us */
+       *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
+out:
+       drm_gem_object_unreference(obj);
+unlock:
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+/**
+ *     psb_gem_create          -       create a mappable object
+ *     @file: the DRM file of the client
+ *     @dev: our device
+ *     @size: the size requested
+ *     @handlep: returned handle (opaque number)
+ *
+ *     Create a GEM object, fill in the boilerplate and attach a handle to
+ *     it so that userspace can speak about it. This does the core work
+ *     for the various methods that do/will create GEM objects for things
+ */
+static int psb_gem_create(struct drm_file *file,
+       struct drm_device *dev, uint64_t size, uint32_t *handlep)
+{
+       struct gtt_range *r;
+       int ret;
+       u32 handle;
+
+       size = roundup(size, PAGE_SIZE);
+
+       /* Allocate our object - for now a direct gtt range which is not
+          stolen memory backed */
+       r = psb_gtt_alloc_range(dev, size, "gem", 0);
+       if (r == NULL) {
+               dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
+               return -ENOSPC;
+       }
+       /* Initialize the extra goodies GEM needs to do all the hard work */
+       if (drm_gem_object_init(dev, &r->gem, size) != 0) {
+               psb_gtt_free_range(dev, r);
+               /* GEM doesn't give an error code so use -ENOMEM */
+               dev_err(dev->dev, "GEM init failed for %lld\n", size);
+               return -ENOMEM;
+       }
+       /* Give the object a handle so we can carry it more easily */
+       ret = drm_gem_handle_create(file, &r->gem, &handle);
+       if (ret) {
+               dev_err(dev->dev, "GEM handle failed for %p, %lld\n",
+                                                       &r->gem, size);
+               drm_gem_object_release(&r->gem);
+               psb_gtt_free_range(dev, r);
+               return ret;
+       }
+       /* We have the initial and handle reference but need only one now */
+       drm_gem_object_unreference(&r->gem);
+       *handlep = handle;
+       return 0;
+}
+
+/**
+ *     psb_gem_dumb_create     -       create a dumb buffer
+ *     @drm_file: our client file
+ *     @dev: our device
+ *     @args: the requested arguments copied from userspace
+ *
+ *     Allocate a buffer suitable for use for a frame buffer of the
+ *     form described by user space. Give userspace a handle by which
+ *     to reference it.
+ */
+int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+                       struct drm_mode_create_dumb *args)
+{
+       args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
+       args->size = args->pitch * args->height;
+       return psb_gem_create(file, dev, args->size, &args->handle);
+}
+
+/**
+ *     psb_gem_dumb_destroy    -       destroy a dumb buffer
+ *     @file: client file
+ *     @dev: our DRM device
+ *     @handle: the object handle
+ *
+ *     Destroy a handle that was created via psb_gem_dumb_create, at least
+ *     we hope it was created that way. i915 seems to assume the caller
+ *     does the checking but that might be worth review ! FIXME
+ */
+int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+                       uint32_t handle)
+{
+       /* No special work needed, drop the reference and see what falls out */
+       return drm_gem_handle_delete(file, handle);
+}
+
+/**
+ *     psb_gem_fault           -       pagefault handler for GEM objects
+ *     @vma: the VMA of the GEM object
+ *     @vmf: fault detail
+ *
+ *     Invoked when a fault occurs on an mmap of a GEM managed area. GEM
+ *     does most of the work for us including the actual map/unmap calls
+ *     but we need to do the actual page work.
+ *
+ *     This code eventually needs to handle faulting objects in and out
+ *     of the GTT and repacking it when we run out of space. We can put
+ *     that off for now and for our simple uses
+ *
+ *     The VMA was set up by GEM. In doing so it also ensured that the
+ *     vma->vm_private_data points to the GEM object that is backing this
+ *     mapping.
+ */
+int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct drm_gem_object *obj;
+       struct gtt_range *r;
+       int ret;
+       unsigned long pfn;
+       pgoff_t page_offset;
+       struct drm_device *dev;
+       struct drm_psb_private *dev_priv;
+
+       obj = vma->vm_private_data;     /* GEM object */
+       dev = obj->dev;
+       dev_priv = dev->dev_private;
+
+       r = container_of(obj, struct gtt_range, gem);   /* Get the gtt range */
+
+       /* Make sure we don't parallel update on a fault, nor move or remove
+          something from beneath our feet */
+       mutex_lock(&dev->struct_mutex);
+
+       /* For now the mmap pins the object and it stays pinned. As things
+          stand that will do us no harm */
+       if (r->mmapping == 0) {
+               ret = psb_gtt_pin(r);
+               if (ret < 0) {
+                       dev_err(dev->dev, "gma500: pin failed: %d\n", ret);
+                       goto fail;
+               }
+               r->mmapping = 1;
+       }
+
+       /* Page relative to the VMA start - we must calculate this ourselves
+          because vmf->pgoff is the fake GEM offset */
+       page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start)
+                               >> PAGE_SHIFT;
+
+       /* CPU view of the page, don't go via the GART for CPU writes */
+       if (r->stolen)
+               pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
+       else
+               pfn = page_to_pfn(r->pages[page_offset]);
+       ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+
+fail:
+       mutex_unlock(&dev->struct_mutex);
+       switch (ret) {
+       case 0:
+       case -ERESTARTSYS:
+       case -EINTR:
+               return VM_FAULT_NOPAGE;
+       case -ENOMEM:
+               return VM_FAULT_OOM;
+       default:
+               return VM_FAULT_SIGBUS;
+       }
+}
+
+static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
+                                               int size, u32 *handle)
+{
+       struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
+       if (gtt == NULL)
+               return -ENOMEM;
+       if (drm_gem_private_object_init(dev, &gtt->gem, size) != 0)
+               goto free_gtt;
+       if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
+               return 0;
+free_gtt:
+       psb_gtt_free_range(dev, gtt);
+       return -ENOMEM;
+}
+
+/*
+ *     GEM interfaces for our specific client
+ */
+int psb_gem_create_ioctl(struct drm_device *dev, void *data,
+                                       struct drm_file *file)
+{
+       struct drm_psb_gem_create *args = data;
+       int ret;
+       if (args->flags & GMA_GEM_CREATE_STOLEN) {
+               ret = psb_gem_create_stolen(file, dev, args->size,
+                                                       &args->handle);
+               if (ret == 0)
+                       return 0;
+               /* Fall throguh */
+               args->flags &= ~GMA_GEM_CREATE_STOLEN;
+       }
+       return psb_gem_create(file, dev, args->size, &args->handle);
+}
+
+int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
+                                       struct drm_file *file)
+{
+       struct drm_psb_gem_mmap *args = data;
+       return dev->driver->dumb_map_offset(file, dev,
+                                               args->handle, &args->offset);
+}
+
diff --git a/drivers/gpu/drm/gma500/gem_glue.c b/drivers/gpu/drm/gma500/gem_glue.c
new file mode 100644 (file)
index 0000000..daac121
--- /dev/null
@@ -0,0 +1,89 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+void drm_gem_object_release_wrap(struct drm_gem_object *obj)
+{
+       /* Remove the list map if one is present */
+       if (obj->map_list.map) {
+               struct drm_gem_mm *mm = obj->dev->mm_private;
+               struct drm_map_list *list = &obj->map_list;
+               drm_ht_remove_item(&mm->offset_hash, &list->hash);
+               drm_mm_put_block(list->file_offset_node);
+               kfree(list->map);
+               list->map = NULL;
+       }
+       drm_gem_object_release(obj);
+}
+
+/**
+ *     gem_create_mmap_offset          -       invent an mmap offset
+ *     @obj: our object
+ *
+ *     Standard implementation of offset generation for mmap as is
+ *     duplicated in several drivers. This belongs in GEM.
+ */
+int gem_create_mmap_offset(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_gem_mm *mm = dev->mm_private;
+       struct drm_map_list *list;
+       struct drm_local_map *map;
+       int ret;
+
+       list = &obj->map_list;
+       list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
+       if (list->map == NULL)
+               return -ENOMEM;
+       map = list->map;
+       map->type = _DRM_GEM;
+       map->size = obj->size;
+       map->handle = obj;
+
+       list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
+                                       obj->size / PAGE_SIZE, 0, 0);
+       if (!list->file_offset_node) {
+               dev_err(dev->dev, "failed to allocate offset for bo %d\n",
+                                                               obj->name);
+               ret = -ENOSPC;
+               goto free_it;
+       }
+       list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+                                       obj->size / PAGE_SIZE, 0);
+       if (!list->file_offset_node) {
+               ret = -ENOMEM;
+               goto free_it;
+       }
+       list->hash.key = list->file_offset_node->start;
+       ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+       if (ret) {
+               dev_err(dev->dev, "failed to add to map hash\n");
+               goto free_mm;
+       }
+       return 0;
+
+free_mm:
+       drm_mm_put_block(list->file_offset_node);
+free_it:
+       kfree(list->map);
+       list->map = NULL;
+       return ret;
+}
diff --git a/drivers/gpu/drm/gma500/gem_glue.h b/drivers/gpu/drm/gma500/gem_glue.h
new file mode 100644 (file)
index 0000000..ce5ce30
--- /dev/null
@@ -0,0 +1,2 @@
+extern void drm_gem_object_release_wrap(struct drm_gem_object *obj);
+extern int gem_create_mmap_offset(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
new file mode 100644 (file)
index 0000000..e770bd1
--- /dev/null
@@ -0,0 +1,553 @@
+/*
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
+ *         Alan Cox <alan@linux.intel.com>
+ */
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+
+
+/*
+ *     GTT resource allocator - manage page mappings in GTT space
+ */
+
+/**
+ *     psb_gtt_mask_pte        -       generate GTT pte entry
+ *     @pfn: page number to encode
+ *     @type: type of memory in the GTT
+ *
+ *     Set the GTT entry for the appropriate memory type.
+ */
+static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
+{
+       uint32_t mask = PSB_PTE_VALID;
+
+       if (type & PSB_MMU_CACHED_MEMORY)
+               mask |= PSB_PTE_CACHED;
+       if (type & PSB_MMU_RO_MEMORY)
+               mask |= PSB_PTE_RO;
+       if (type & PSB_MMU_WO_MEMORY)
+               mask |= PSB_PTE_WO;
+
+       return (pfn << PAGE_SHIFT) | mask;
+}
+
+/**
+ *     psb_gtt_entry           -       find the GTT entries for a gtt_range
+ *     @dev: our DRM device
+ *     @r: our GTT range
+ *
+ *     Given a gtt_range object return the GTT offset of the page table
+ *     entries for this gtt_range
+ */
+u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned long offset;
+
+       offset = r->resource.start - dev_priv->gtt_mem->start;
+
+       return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
+}
+
+/**
+ *     psb_gtt_insert  -       put an object into the GTT
+ *     @dev: our DRM device
+ *     @r: our GTT range
+ *
+ *     Take our preallocated GTT range and insert the GEM object into
+ *     the GTT. This is protected via the gtt mutex which the caller
+ *     must hold.
+ */
+static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
+{
+       u32 *gtt_slot, pte;
+       struct page **pages;
+       int i;
+
+       if (r->pages == NULL) {
+               WARN_ON(1);
+               return -EINVAL;
+       }
+
+       WARN_ON(r->stolen);     /* refcount these maybe ? */
+
+       gtt_slot = psb_gtt_entry(dev, r);
+       pages = r->pages;
+
+       /* Make sure changes are visible to the GPU */
+       set_pages_array_uc(pages, r->npage);
+
+       /* Write our page entries into the GTT itself */
+       for (i = r->roll; i < r->npage; i++) {
+               pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+               iowrite32(pte, gtt_slot++);
+       }
+       for (i = 0; i < r->roll; i++) {
+               pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+               iowrite32(pte, gtt_slot++);
+       }
+       /* Make sure all the entries are set before we return */
+       ioread32(gtt_slot - 1);
+
+       return 0;
+}
+
+/**
+ *     psb_gtt_remove  -       remove an object from the GTT
+ *     @dev: our DRM device
+ *     @r: our GTT range
+ *
+ *     Remove a preallocated GTT range from the GTT. Overwrite all the
+ *     page table entries with the dummy page. This is protected via the gtt
+ *     mutex which the caller must hold.
+ */
+static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 *gtt_slot, pte;
+       int i;
+
+       WARN_ON(r->stolen);
+
+       gtt_slot = psb_gtt_entry(dev, r);
+       pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0);
+
+       for (i = 0; i < r->npage; i++)
+               iowrite32(pte, gtt_slot++);
+       ioread32(gtt_slot - 1);
+       set_pages_array_wb(r->pages, r->npage);
+}
+
+/**
+ *     psb_gtt_roll    -       set scrolling position
+ *     @dev: our DRM device
+ *     @r: the gtt mapping we are using
+ *     @roll: roll offset
+ *
+ *     Roll an existing pinned mapping by moving the pages through the GTT.
+ *     This allows us to implement hardware scrolling on the consoles without
+ *     a 2D engine
+ */
+void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
+{
+       u32 *gtt_slot, pte;
+       int i;
+
+       if (roll >= r->npage) {
+               WARN_ON(1);
+               return;
+       }
+
+       r->roll = roll;
+
+       /* Not currently in the GTT - no worry we will write the mapping at
+          the right position when it gets pinned */
+       if (!r->stolen && !r->in_gart)
+               return;
+
+       gtt_slot = psb_gtt_entry(dev, r);
+
+       for (i = r->roll; i < r->npage; i++) {
+               pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+               iowrite32(pte, gtt_slot++);
+       }
+       for (i = 0; i < r->roll; i++) {
+               pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+               iowrite32(pte, gtt_slot++);
+       }
+       ioread32(gtt_slot - 1);
+}
+
+/**
+ *     psb_gtt_attach_pages    -       attach and pin GEM pages
+ *     @gt: the gtt range
+ *
+ *     Pin and build an in kernel list of the pages that back our GEM object.
+ *     While we hold this the pages cannot be swapped out. This is protected
+ *     via the gtt mutex which the caller must hold.
+ */
+static int psb_gtt_attach_pages(struct gtt_range *gt)
+{
+       struct inode *inode;
+       struct address_space *mapping;
+       int i;
+       struct page *p;
+       int pages = gt->gem.size / PAGE_SIZE;
+
+       WARN_ON(gt->pages);
+
+       /* This is the shared memory object that backs the GEM resource */
+       inode = gt->gem.filp->f_path.dentry->d_inode;
+       mapping = inode->i_mapping;
+
+       gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
+       if (gt->pages == NULL)
+               return -ENOMEM;
+       gt->npage = pages;
+
+       for (i = 0; i < pages; i++) {
+               /* FIXME: needs updating as per mail from Hugh Dickins */
+               p = read_cache_page_gfp(mapping, i,
+                                       __GFP_COLD | GFP_KERNEL);
+               if (IS_ERR(p))
+                       goto err;
+               gt->pages[i] = p;
+       }
+       return 0;
+
+err:
+       while (i--)
+               page_cache_release(gt->pages[i]);
+       kfree(gt->pages);
+       gt->pages = NULL;
+       return PTR_ERR(p);
+}
+
+/**
+ *     psb_gtt_detach_pages    -       attach and pin GEM pages
+ *     @gt: the gtt range
+ *
+ *     Undo the effect of psb_gtt_attach_pages. At this point the pages
+ *     must have been removed from the GTT as they could now be paged out
+ *     and move bus address. This is protected via the gtt mutex which the
+ *     caller must hold.
+ */
+static void psb_gtt_detach_pages(struct gtt_range *gt)
+{
+       int i;
+       for (i = 0; i < gt->npage; i++) {
+               /* FIXME: do we need to force dirty */
+               set_page_dirty(gt->pages[i]);
+               page_cache_release(gt->pages[i]);
+       }
+       kfree(gt->pages);
+       gt->pages = NULL;
+}
+
+/**
+ *     psb_gtt_pin             -       pin pages into the GTT
+ *     @gt: range to pin
+ *
+ *     Pin a set of pages into the GTT. The pins are refcounted so that
+ *     multiple pins need multiple unpins to undo.
+ *
+ *     Non GEM backed objects treat this as a no-op as they are always GTT
+ *     backed objects.
+ */
+int psb_gtt_pin(struct gtt_range *gt)
+{
+       int ret = 0;
+       struct drm_device *dev = gt->gem.dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       mutex_lock(&dev_priv->gtt_mutex);
+
+       if (gt->in_gart == 0 && gt->stolen == 0) {
+               ret = psb_gtt_attach_pages(gt);
+               if (ret < 0)
+                       goto out;
+               ret = psb_gtt_insert(dev, gt);
+               if (ret < 0) {
+                       psb_gtt_detach_pages(gt);
+                       goto out;
+               }
+       }
+       gt->in_gart++;
+out:
+       mutex_unlock(&dev_priv->gtt_mutex);
+       return ret;
+}
+
+/**
+ *     psb_gtt_unpin           -       Drop a GTT pin requirement
+ *     @gt: range to pin
+ *
+ *     Undoes the effect of psb_gtt_pin. On the last drop the GEM object
+ *     will be removed from the GTT which will also drop the page references
+ *     and allow the VM to clean up or page stuff.
+ *
+ *     Non GEM backed objects treat this as a no-op as they are always GTT
+ *     backed objects.
+ */
+void psb_gtt_unpin(struct gtt_range *gt)
+{
+       struct drm_device *dev = gt->gem.dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       mutex_lock(&dev_priv->gtt_mutex);
+
+       WARN_ON(!gt->in_gart);
+
+       gt->in_gart--;
+       if (gt->in_gart == 0 && gt->stolen == 0) {
+               psb_gtt_remove(dev, gt);
+               psb_gtt_detach_pages(gt);
+       }
+       mutex_unlock(&dev_priv->gtt_mutex);
+}
+
+/*
+ *     GTT resource allocator - allocate and manage GTT address space
+ */
+
+/**
+ *     psb_gtt_alloc_range     -       allocate GTT address space
+ *     @dev: Our DRM device
+ *     @len: length (bytes) of address space required
+ *     @name: resource name
+ *     @backed: resource should be backed by stolen pages
+ *
+ *     Ask the kernel core to find us a suitable range of addresses
+ *     to use for a GTT mapping.
+ *
+ *     Returns a gtt_range structure describing the object, or NULL on
+ *     error. On successful return the resource is both allocated and marked
+ *     as in use.
+ */
+struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
+                                               const char *name, int backed)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct gtt_range *gt;
+       struct resource *r = dev_priv->gtt_mem;
+       int ret;
+       unsigned long start, end;
+
+       if (backed) {
+               /* The start of the GTT is the stolen pages */
+               start = r->start;
+               end = r->start + dev_priv->gtt.stolen_size - 1;
+       } else {
+               /* The rest we will use for GEM backed objects */
+               start = r->start + dev_priv->gtt.stolen_size;
+               end = r->end;
+       }
+
+       gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
+       if (gt == NULL)
+               return NULL;
+       gt->resource.name = name;
+       gt->stolen = backed;
+       gt->in_gart = backed;
+       gt->roll = 0;
+       /* Ensure this is set for non GEM objects */
+       gt->gem.dev = dev;
+       ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
+                               len, start, end, PAGE_SIZE, NULL, NULL);
+       if (ret == 0) {
+               gt->offset = gt->resource.start - r->start;
+               return gt;
+       }
+       kfree(gt);
+       return NULL;
+}
+
+/**
+ *     psb_gtt_free_range      -       release GTT address space
+ *     @dev: our DRM device
+ *     @gt: a mapping created with psb_gtt_alloc_range
+ *
+ *     Release a resource that was allocated with psb_gtt_alloc_range. If the
+ *     object has been pinned by mmap users we clean this up here currently.
+ */
+void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
+{
+       /* Undo the mmap pin if we are destroying the object */
+       if (gt->mmapping) {
+               psb_gtt_unpin(gt);
+               gt->mmapping = 0;
+       }
+       WARN_ON(gt->in_gart && !gt->stolen);
+       release_resource(&gt->resource);
+       kfree(gt);
+}
+
+void psb_gtt_alloc(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       init_rwsem(&dev_priv->gtt.sem);
+}
+
+void psb_gtt_takedown(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (dev_priv->gtt_map) {
+               iounmap(dev_priv->gtt_map);
+               dev_priv->gtt_map = NULL;
+       }
+       if (dev_priv->gtt_initialized) {
+               pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+                                     dev_priv->gmch_ctrl);
+               PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
+               (void) PSB_RVDC32(PSB_PGETBL_CTL);
+       }
+       if (dev_priv->vram_addr)
+               iounmap(dev_priv->gtt_map);
+}
+
+int psb_gtt_init(struct drm_device *dev, int resume)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned gtt_pages;
+       unsigned long stolen_size, vram_stolen_size;
+       unsigned i, num_pages;
+       unsigned pfn_base;
+       uint32_t vram_pages;
+       uint32_t dvmt_mode = 0;
+       struct psb_gtt *pg;
+
+       int ret = 0;
+       uint32_t pte;
+
+       mutex_init(&dev_priv->gtt_mutex);
+
+       psb_gtt_alloc(dev);
+       pg = &dev_priv->gtt;
+
+       /* Enable the GTT */
+       pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
+       pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+                             dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
+
+       dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
+       PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
+       (void) PSB_RVDC32(PSB_PGETBL_CTL);
+
+       /* The root resource we allocate address space from */
+       dev_priv->gtt_initialized = 1;
+
+       pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
+
+       /*
+        *      The video mmu has a hw bug when accessing 0x0D0000000.
+        *      Make gatt start at 0x0e000,0000. This doesn't actually
+        *      matter for us but may do if the video acceleration ever
+        *      gets opened up.
+        */
+       pg->mmu_gatt_start = 0xE0000000;
+
+       pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
+       gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
+                                                               >> PAGE_SHIFT;
+       /* Some CDV firmware doesn't report this currently. In which case the
+          system has 64 gtt pages */
+       if (pg->gtt_start == 0 || gtt_pages == 0) {
+               dev_err(dev->dev, "GTT PCI BAR not initialized.\n");
+               gtt_pages = 64;
+               pg->gtt_start = dev_priv->pge_ctl;
+       }
+
+       pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
+       pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
+                                                               >> PAGE_SHIFT;
+       dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
+
+       if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
+               static struct resource fudge;   /* Preferably peppermint */
+               /* This can occur on CDV SDV systems. Fudge it in this case.
+                  We really don't care what imaginary space is being allocated
+                  at this point */
+               dev_err(dev->dev, "GATT PCI BAR not initialized.\n");
+               pg->gatt_start = 0x40000000;
+               pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
+               /* This is a little confusing but in fact the GTT is providing
+                  a view from the GPU into memory and not vice versa. As such
+                  this is really allocating space that is not the same as the
+                  CPU address space on CDV */
+               fudge.start = 0x40000000;
+               fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
+               fudge.name = "fudge";
+               fudge.flags = IORESOURCE_MEM;
+               dev_priv->gtt_mem = &fudge;
+       }
+
+       pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
+       vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
+                                                               - PAGE_SIZE;
+
+       stolen_size = vram_stolen_size;
+
+       printk(KERN_INFO "Stolen memory information\n");
+       printk(KERN_INFO "       base in RAM: 0x%x\n", dev_priv->stolen_base);
+       printk(KERN_INFO "       size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
+               vram_stolen_size/1024);
+       dvmt_mode = (dev_priv->gmch_ctrl >> 4) & 0x7;
+       printk(KERN_INFO "      the correct size should be: %dM(dvmt mode=%d)\n",
+               (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
+
+       if (resume && (gtt_pages != pg->gtt_pages) &&
+           (stolen_size != pg->stolen_size)) {
+               dev_err(dev->dev, "GTT resume error.\n");
+               ret = -EINVAL;
+               goto out_err;
+       }
+
+       pg->gtt_pages = gtt_pages;
+       pg->stolen_size = stolen_size;
+       dev_priv->vram_stolen_size = vram_stolen_size;
+
+       /*
+        *      Map the GTT and the stolen memory area
+        */
+       dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
+                                               gtt_pages << PAGE_SHIFT);
+       if (!dev_priv->gtt_map) {
+               dev_err(dev->dev, "Failure to map gtt.\n");
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size);
+       if (!dev_priv->vram_addr) {
+               dev_err(dev->dev, "Failure to map stolen base.\n");
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       /*
+        * Insert vram stolen pages into the GTT
+        */
+
+       pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
+       vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
+       printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
+               num_pages, pfn_base << PAGE_SHIFT, 0);
+       for (i = 0; i < num_pages; ++i) {
+               pte = psb_gtt_mask_pte(pfn_base + i, 0);
+               iowrite32(pte, dev_priv->gtt_map + i);
+       }
+
+       /*
+        * Init rest of GTT to the scratch page to avoid accidents or scribbles
+        */
+
+       pfn_base = page_to_pfn(dev_priv->scratch_page);
+       pte = psb_gtt_mask_pte(pfn_base, 0);
+       for (; i < gtt_pages; ++i)
+               iowrite32(pte, dev_priv->gtt_map + i);
+
+       (void) ioread32(dev_priv->gtt_map + i - 1);
+       return 0;
+
+out_err:
+       psb_gtt_takedown(dev);
+       return ret;
+}
diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
new file mode 100644 (file)
index 0000000..aa17423
--- /dev/null
@@ -0,0 +1,64 @@
+/**************************************************************************
+ * Copyright (c) 2007-2008, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PSB_GTT_H_
+#define _PSB_GTT_H_
+
+#include <drm/drmP.h>
+
+/* This wants cleaning up with respect to the psb_dev and un-needed stuff */
+struct psb_gtt {
+       uint32_t gatt_start;
+       uint32_t mmu_gatt_start;
+       uint32_t gtt_start;
+       uint32_t gtt_phys_start;
+       unsigned gtt_pages;
+       unsigned gatt_pages;
+       unsigned long stolen_size;
+       unsigned long vram_stolen_size;
+       struct rw_semaphore sem;
+};
+
+/* Exported functions */
+extern int psb_gtt_init(struct drm_device *dev, int resume);
+extern void psb_gtt_takedown(struct drm_device *dev);
+
+/* Each gtt_range describes an allocation in the GTT area */
+struct gtt_range {
+       struct resource resource;       /* Resource for our allocation */
+       u32 offset;                     /* GTT offset of our object */
+       struct drm_gem_object gem;      /* GEM high level stuff */
+       int in_gart;                    /* Currently in the GART (ref ct) */
+       bool stolen;                    /* Backed from stolen RAM */
+       bool mmapping;                  /* Is mmappable */
+       struct page **pages;            /* Backing pages if present */
+       int npage;                      /* Number of backing pages */
+       int roll;                       /* Roll applied to the GTT entries */
+};
+
+extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
+                                               const char *name, int backed);
+extern void psb_gtt_kref_put(struct gtt_range *gt);
+extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt);
+extern int psb_gtt_pin(struct gtt_range *gt);
+extern void psb_gtt_unpin(struct gtt_range *gt);
+extern void psb_gtt_roll(struct drm_device *dev,
+                                       struct gtt_range *gt, int roll);
+
+#endif
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
new file mode 100644 (file)
index 0000000..d4d0c5b
--- /dev/null
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2006 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+
+
+static void *find_section(struct bdb_header *bdb, int section_id)
+{
+       u8 *base = (u8 *)bdb;
+       int index = 0;
+       u16 total, current_size;
+       u8 current_id;
+
+       /* skip to first section */
+       index += bdb->header_size;
+       total = bdb->bdb_size;
+
+       /* walk the sections looking for section_id */
+       while (index < total) {
+               current_id = *(base + index);
+               index++;
+               current_size = *((u16 *)(base + index));
+               index += 2;
+               if (current_id == section_id)
+                       return base + index;
+               index += current_size;
+       }
+
+       return NULL;
+}
+
+static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
+                       struct lvds_dvo_timing *dvo_timing)
+{
+       panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
+               dvo_timing->hactive_lo;
+       panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
+               ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
+       panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
+               dvo_timing->hsync_pulse_width;
+       panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
+               ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
+
+       panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
+               dvo_timing->vactive_lo;
+       panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
+               dvo_timing->vsync_off;
+       panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
+               dvo_timing->vsync_pulse_width;
+       panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
+               ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
+       panel_fixed_mode->clock = dvo_timing->clock * 10;
+       panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+
+       /* Some VBTs have bogus h/vtotal values */
+       if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
+               panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
+       if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
+               panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
+
+       drm_mode_set_name(panel_fixed_mode);
+}
+
+static void parse_backlight_data(struct drm_psb_private *dev_priv,
+                               struct bdb_header *bdb)
+{
+       struct bdb_lvds_backlight *vbt_lvds_bl = NULL;
+       struct bdb_lvds_backlight *lvds_bl;
+       u8 p_type = 0;
+       void *bl_start = NULL;
+       struct bdb_lvds_options *lvds_opts
+                               = find_section(bdb, BDB_LVDS_OPTIONS);
+
+       dev_priv->lvds_bl = NULL;
+
+       if (lvds_opts)
+               p_type = lvds_opts->panel_type;
+       else
+               return;
+
+       bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT);
+       vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
+
+       lvds_bl = kzalloc(sizeof(*vbt_lvds_bl), GFP_KERNEL);
+       if (!lvds_bl) {
+               dev_err(dev_priv->dev->dev, "out of memory for backlight data\n");
+               return;
+       }
+       memcpy(lvds_bl, vbt_lvds_bl, sizeof(*vbt_lvds_bl));
+       dev_priv->lvds_bl = lvds_bl;
+}
+
+/* Try to find integrated panel data */
+static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
+                           struct bdb_header *bdb)
+{
+       struct bdb_lvds_options *lvds_options;
+       struct bdb_lvds_lfp_data *lvds_lfp_data;
+       struct bdb_lvds_lfp_data_entry *entry;
+       struct lvds_dvo_timing *dvo_timing;
+       struct drm_display_mode *panel_fixed_mode;
+
+       /* Defaults if we can't find VBT info */
+       dev_priv->lvds_dither = 0;
+       dev_priv->lvds_vbt = 0;
+
+       lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
+       if (!lvds_options)
+               return;
+
+       dev_priv->lvds_dither = lvds_options->pixel_dither;
+       if (lvds_options->panel_type == 0xff)
+               return;
+
+       lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
+       if (!lvds_lfp_data)
+               return;
+
+
+       entry = &lvds_lfp_data->data[lvds_options->panel_type];
+       dvo_timing = &entry->dvo_timing;
+
+       panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode),
+                                     GFP_KERNEL);
+       if (panel_fixed_mode == NULL) {
+               dev_err(dev_priv->dev->dev, "out of memory for fixed panel mode\n");
+               return;
+       }
+
+       dev_priv->lvds_vbt = 1;
+       fill_detail_timing_data(panel_fixed_mode, dvo_timing);
+
+       if (panel_fixed_mode->htotal > 0 && panel_fixed_mode->vtotal > 0) {
+               dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
+               drm_mode_debug_printmodeline(panel_fixed_mode);
+       } else {
+               dev_dbg(dev_priv->dev->dev, "ignoring invalid LVDS VBT\n");
+               dev_priv->lvds_vbt = 0;
+               kfree(panel_fixed_mode);
+       }
+       return;
+}
+
+/* Try to find sdvo panel data */
+static void parse_sdvo_panel_data(struct drm_psb_private *dev_priv,
+                     struct bdb_header *bdb)
+{
+       struct bdb_sdvo_lvds_options *sdvo_lvds_options;
+       struct lvds_dvo_timing *dvo_timing;
+       struct drm_display_mode *panel_fixed_mode;
+
+       dev_priv->sdvo_lvds_vbt_mode = NULL;
+
+       sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
+       if (!sdvo_lvds_options)
+               return;
+
+       dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
+       if (!dvo_timing)
+               return;
+
+       panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+
+       if (!panel_fixed_mode)
+               return;
+
+       fill_detail_timing_data(panel_fixed_mode,
+                       dvo_timing + sdvo_lvds_options->panel_type);
+
+       dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
+
+       return;
+}
+
+static void parse_general_features(struct drm_psb_private *dev_priv,
+                      struct bdb_header *bdb)
+{
+       struct bdb_general_features *general;
+
+       /* Set sensible defaults in case we can't find the general block */
+       dev_priv->int_tv_support = 1;
+       dev_priv->int_crt_support = 1;
+
+       general = find_section(bdb, BDB_GENERAL_FEATURES);
+       if (general) {
+               dev_priv->int_tv_support = general->int_tv_support;
+               dev_priv->int_crt_support = general->int_crt_support;
+               dev_priv->lvds_use_ssc = general->enable_ssc;
+
+               if (dev_priv->lvds_use_ssc) {
+                       dev_priv->lvds_ssc_freq
+                               = general->ssc_freq ? 100 : 96;
+               }
+       }
+}
+
+/**
+ * psb_intel_init_bios - initialize VBIOS settings & find VBT
+ * @dev: DRM device
+ *
+ * Loads the Video BIOS and checks that the VBT exists.  Sets scratch registers
+ * to appropriate values.
+ *
+ * VBT existence is a sanity check that is relied on by other i830_bios.c code.
+ * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
+ * feed an updated VBT back through that, compared to what we'll fetch using
+ * this method of groping around in the BIOS data.
+ *
+ * Returns 0 on success, nonzero on failure.
+ */
+bool psb_intel_init_bios(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct pci_dev *pdev = dev->pdev;
+       struct vbt_header *vbt = NULL;
+       struct bdb_header *bdb;
+       u8 __iomem *bios;
+       size_t size;
+       int i;
+
+       bios = pci_map_rom(pdev, &size);
+       if (!bios)
+               return -1;
+
+       /* Scour memory looking for the VBT signature */
+       for (i = 0; i + 4 < size; i++) {
+               if (!memcmp(bios + i, "$VBT", 4)) {
+                       vbt = (struct vbt_header *)(bios + i);
+                       break;
+               }
+       }
+
+       if (!vbt) {
+               dev_err(dev->dev, "VBT signature missing\n");
+               pci_unmap_rom(pdev, bios);
+               return -1;
+       }
+
+       bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
+
+       /* Grab useful general definitions */
+       parse_general_features(dev_priv, bdb);
+       parse_lfp_panel_data(dev_priv, bdb);
+       parse_sdvo_panel_data(dev_priv, bdb);
+       parse_backlight_data(dev_priv, bdb);
+
+       pci_unmap_rom(pdev, bios);
+
+       return 0;
+}
+
+/**
+ * Destroy and free VBT data
+ */
+void psb_intel_destroy_bios(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct drm_display_mode *sdvo_lvds_vbt_mode =
+                               dev_priv->sdvo_lvds_vbt_mode;
+       struct drm_display_mode *lfp_lvds_vbt_mode =
+                               dev_priv->lfp_lvds_vbt_mode;
+       struct bdb_lvds_backlight *lvds_bl =
+                               dev_priv->lvds_bl;
+
+       /*free sdvo panel mode*/
+       if (sdvo_lvds_vbt_mode) {
+               dev_priv->sdvo_lvds_vbt_mode = NULL;
+               kfree(sdvo_lvds_vbt_mode);
+       }
+
+       if (lfp_lvds_vbt_mode) {
+               dev_priv->lfp_lvds_vbt_mode = NULL;
+               kfree(lfp_lvds_vbt_mode);
+       }
+
+       if (lvds_bl) {
+               dev_priv->lvds_bl = NULL;
+               kfree(lvds_bl);
+       }
+}
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
new file mode 100644 (file)
index 0000000..70f1bf0
--- /dev/null
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2006 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _I830_BIOS_H_
+#define _I830_BIOS_H_
+
+#include <drm/drmP.h>
+
+struct vbt_header {
+       u8 signature[20];               /**< Always starts with 'VBT$' */
+       u16 version;                    /**< decimal */
+       u16 header_size;                /**< in bytes */
+       u16 vbt_size;                   /**< in bytes */
+       u8 vbt_checksum;
+       u8 reserved0;
+       u32 bdb_offset;                 /**< from beginning of VBT */
+       u32 aim_offset[4];              /**< from beginning of VBT */
+} __attribute__((packed));
+
+
+struct bdb_header {
+       u8 signature[16];               /**< Always 'BIOS_DATA_BLOCK' */
+       u16 version;                    /**< decimal */
+       u16 header_size;                /**< in bytes */
+       u16 bdb_size;                   /**< in bytes */
+};
+
+/* strictly speaking, this is a "skip" block, but it has interesting info */
+struct vbios_data {
+       u8 type; /* 0 == desktop, 1 == mobile */
+       u8 relstage;
+       u8 chipset;
+       u8 lvds_present:1;
+       u8 tv_present:1;
+       u8 rsvd2:6; /* finish byte */
+       u8 rsvd3[4];
+       u8 signon[155];
+       u8 copyright[61];
+       u16 code_segment;
+       u8 dos_boot_mode;
+       u8 bandwidth_percent;
+       u8 rsvd4; /* popup memory size */
+       u8 resize_pci_bios;
+       u8 rsvd5; /* is crt already on ddc2 */
+} __attribute__((packed));
+
+/*
+ * There are several types of BIOS data blocks (BDBs), each block has
+ * an ID and size in the first 3 bytes (ID in first, size in next 2).
+ * Known types are listed below.
+ */
+#define BDB_GENERAL_FEATURES     1
+#define BDB_GENERAL_DEFINITIONS          2
+#define BDB_OLD_TOGGLE_LIST      3
+#define BDB_MODE_SUPPORT_LIST    4
+#define BDB_GENERIC_MODE_TABLE   5
+#define BDB_EXT_MMIO_REGS        6
+#define BDB_SWF_IO               7
+#define BDB_SWF_MMIO             8
+#define BDB_DOT_CLOCK_TABLE      9
+#define BDB_MODE_REMOVAL_TABLE  10
+#define BDB_CHILD_DEVICE_TABLE  11
+#define BDB_DRIVER_FEATURES     12
+#define BDB_DRIVER_PERSISTENCE  13
+#define BDB_EXT_TABLE_PTRS      14
+#define BDB_DOT_CLOCK_OVERRIDE  15
+#define BDB_DISPLAY_SELECT      16
+/* 17 rsvd */
+#define BDB_DRIVER_ROTATION     18
+#define BDB_DISPLAY_REMOVE      19
+#define BDB_OEM_CUSTOM          20
+#define BDB_EFP_LIST            21 /* workarounds for VGA hsync/vsync */
+#define BDB_SDVO_LVDS_OPTIONS   22
+#define BDB_SDVO_PANEL_DTDS     23
+#define BDB_SDVO_LVDS_PNP_IDS   24
+#define BDB_SDVO_LVDS_POWER_SEQ         25
+#define BDB_TV_OPTIONS          26
+#define BDB_LVDS_OPTIONS        40
+#define BDB_LVDS_LFP_DATA_PTRS  41
+#define BDB_LVDS_LFP_DATA       42
+#define BDB_LVDS_BACKLIGHT      43
+#define BDB_LVDS_POWER          44
+#define BDB_SKIP               254 /* VBIOS private block, ignore */
+
+struct bdb_general_features {
+       /* bits 1 */
+       u8 panel_fitting:2;
+       u8 flexaim:1;
+       u8 msg_enable:1;
+       u8 clear_screen:3;
+       u8 color_flip:1;
+
+       /* bits 2 */
+       u8 download_ext_vbt:1;
+       u8 enable_ssc:1;
+       u8 ssc_freq:1;
+       u8 enable_lfp_on_override:1;
+       u8 disable_ssc_ddt:1;
+       u8 rsvd8:3; /* finish byte */
+
+       /* bits 3 */
+       u8 disable_smooth_vision:1;
+       u8 single_dvi:1;
+       u8 rsvd9:6; /* finish byte */
+
+       /* bits 4 */
+       u8 legacy_monitor_detect;
+
+       /* bits 5 */
+       u8 int_crt_support:1;
+       u8 int_tv_support:1;
+       u8 rsvd11:6; /* finish byte */
+} __attribute__((packed));
+
+struct bdb_general_definitions {
+       /* DDC GPIO */
+       u8 crt_ddc_gmbus_pin;
+
+       /* DPMS bits */
+       u8 dpms_acpi:1;
+       u8 skip_boot_crt_detect:1;
+       u8 dpms_aim:1;
+       u8 rsvd1:5; /* finish byte */
+
+       /* boot device bits */
+       u8 boot_display[2];
+       u8 child_dev_size;
+
+       /* device info */
+       u8 tv_or_lvds_info[33];
+       u8 dev1[33];
+       u8 dev2[33];
+       u8 dev3[33];
+       u8 dev4[33];
+       /* may be another device block here on some platforms */
+};
+
+struct bdb_lvds_options {
+       u8 panel_type;
+       u8 rsvd1;
+       /* LVDS capabilities, stored in a dword */
+       u8 pfit_mode:2;
+       u8 pfit_text_mode_enhanced:1;
+       u8 pfit_gfx_mode_enhanced:1;
+       u8 pfit_ratio_auto:1;
+       u8 pixel_dither:1;
+       u8 lvds_edid:1;
+       u8 rsvd2:1;
+       u8 rsvd4;
+} __attribute__((packed));
+
+struct bdb_lvds_backlight {
+       u8 type:2;
+       u8 pol:1;
+       u8 gpio:3;
+       u8 gmbus:2;
+       u16 freq;
+       u8 minbrightness;
+       u8 i2caddr;
+       u8 brightnesscmd;
+       /*FIXME: more...*/
+} __attribute__((packed));
+
+/* LFP pointer table contains entries to the struct below */
+struct bdb_lvds_lfp_data_ptr {
+       u16 fp_timing_offset; /* offsets are from start of bdb */
+       u8 fp_table_size;
+       u16 dvo_timing_offset;
+       u8 dvo_table_size;
+       u16 panel_pnp_id_offset;
+       u8 pnp_table_size;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data_ptrs {
+       u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+       struct bdb_lvds_lfp_data_ptr ptr[16];
+} __attribute__((packed));
+
+/* LFP data has 3 blocks per entry */
+struct lvds_fp_timing {
+       u16 x_res;
+       u16 y_res;
+       u32 lvds_reg;
+       u32 lvds_reg_val;
+       u32 pp_on_reg;
+       u32 pp_on_reg_val;
+       u32 pp_off_reg;
+       u32 pp_off_reg_val;
+       u32 pp_cycle_reg;
+       u32 pp_cycle_reg_val;
+       u32 pfit_reg;
+       u32 pfit_reg_val;
+       u16 terminator;
+} __attribute__((packed));
+
+struct lvds_dvo_timing {
+       u16 clock;              /**< In 10khz */
+       u8 hactive_lo;
+       u8 hblank_lo;
+       u8 hblank_hi:4;
+       u8 hactive_hi:4;
+       u8 vactive_lo;
+       u8 vblank_lo;
+       u8 vblank_hi:4;
+       u8 vactive_hi:4;
+       u8 hsync_off_lo;
+       u8 hsync_pulse_width;
+       u8 vsync_pulse_width:4;
+       u8 vsync_off:4;
+       u8 rsvd0:6;
+       u8 hsync_off_hi:2;
+       u8 h_image;
+       u8 v_image;
+       u8 max_hv;
+       u8 h_border;
+       u8 v_border;
+       u8 rsvd1:3;
+       u8 digital:2;
+       u8 vsync_positive:1;
+       u8 hsync_positive:1;
+       u8 rsvd2:1;
+} __attribute__((packed));
+
+struct lvds_pnp_id {
+       u16 mfg_name;
+       u16 product_code;
+       u32 serial;
+       u8 mfg_week;
+       u8 mfg_year;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data_entry {
+       struct lvds_fp_timing fp_timing;
+       struct lvds_dvo_timing dvo_timing;
+       struct lvds_pnp_id pnp_id;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data {
+       struct bdb_lvds_lfp_data_entry data[16];
+} __attribute__((packed));
+
+struct aimdb_header {
+       char signature[16];
+       char oem_device[20];
+       u16 aimdb_version;
+       u16 aimdb_header_size;
+       u16 aimdb_size;
+} __attribute__((packed));
+
+struct aimdb_block {
+       u8 aimdb_id;
+       u16 aimdb_size;
+} __attribute__((packed));
+
+struct vch_panel_data {
+       u16 fp_timing_offset;
+       u8 fp_timing_size;
+       u16 dvo_timing_offset;
+       u8 dvo_timing_size;
+       u16 text_fitting_offset;
+       u8 text_fitting_size;
+       u16 graphics_fitting_offset;
+       u8 graphics_fitting_size;
+} __attribute__((packed));
+
+struct vch_bdb_22 {
+       struct aimdb_block aimdb_block;
+       struct vch_panel_data panels[16];
+} __attribute__((packed));
+
+struct bdb_sdvo_lvds_options {
+       u8 panel_backlight;
+       u8 h40_set_panel_type;
+       u8 panel_type;
+       u8 ssc_clk_freq;
+       u16 als_low_trip;
+       u16 als_high_trip;
+       u8 sclalarcoeff_tab_row_num;
+       u8 sclalarcoeff_tab_row_size;
+       u8 coefficient[8];
+       u8 panel_misc_bits_1;
+       u8 panel_misc_bits_2;
+       u8 panel_misc_bits_3;
+       u8 panel_misc_bits_4;
+} __attribute__((packed));
+
+
+extern bool psb_intel_init_bios(struct drm_device *dev);
+extern void psb_intel_destroy_bios(struct drm_device *dev);
+
+/*
+ * Driver<->VBIOS interaction occurs through scratch bits in
+ * GR18 & SWF*.
+ */
+
+/* GR18 bits are set on display switch and hotkey events */
+#define GR18_DRIVER_SWITCH_EN  (1<<7) /* 0: VBIOS control, 1: driver control */
+#define GR18_HOTKEY_MASK       0x78 /* See also SWF4 15:0 */
+#define   GR18_HK_NONE         (0x0<<3)
+#define   GR18_HK_LFP_STRETCH  (0x1<<3)
+#define   GR18_HK_TOGGLE_DISP  (0x2<<3)
+#define   GR18_HK_DISP_SWITCH  (0x4<<3) /* see SWF14 15:0 for what to enable */
+#define   GR18_HK_POPUP_DISABLED (0x6<<3)
+#define   GR18_HK_POPUP_ENABLED        (0x7<<3)
+#define   GR18_HK_PFIT         (0x8<<3)
+#define   GR18_HK_APM_CHANGE   (0xa<<3)
+#define   GR18_HK_MULTIPLE     (0xc<<3)
+#define GR18_USER_INT_EN       (1<<2)
+#define GR18_A0000_FLUSH_EN    (1<<1)
+#define GR18_SMM_EN            (1<<0)
+
+/* Set by driver, cleared by VBIOS */
+#define SWF00_YRES_SHIFT       16
+#define SWF00_XRES_SHIFT       0
+#define SWF00_RES_MASK         0xffff
+
+/* Set by VBIOS at boot time and driver at runtime */
+#define SWF01_TV2_FORMAT_SHIFT 8
+#define SWF01_TV1_FORMAT_SHIFT 0
+#define SWF01_TV_FORMAT_MASK   0xffff
+
+#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
+#define SWF10_GTT_OVERRIDE_EN  (1<<28)
+#define SWF10_LFP_DPMS_OVR     (1<<27) /* override DPMS on display switch */
+#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
+#define   SWF10_OLD_TOGGLE     0x0
+#define   SWF10_TOGGLE_LIST_1  0x1
+#define   SWF10_TOGGLE_LIST_2  0x2
+#define   SWF10_TOGGLE_LIST_3  0x3
+#define   SWF10_TOGGLE_LIST_4  0x4
+#define SWF10_PANNING_EN       (1<<23)
+#define SWF10_DRIVER_LOADED    (1<<22)
+#define SWF10_EXTENDED_DESKTOP (1<<21)
+#define SWF10_EXCLUSIVE_MODE   (1<<20)
+#define SWF10_OVERLAY_EN       (1<<19)
+#define SWF10_PLANEB_HOLDOFF   (1<<18)
+#define SWF10_PLANEA_HOLDOFF   (1<<17)
+#define SWF10_VGA_HOLDOFF      (1<<16)
+#define SWF10_ACTIVE_DISP_MASK 0xffff
+#define   SWF10_PIPEB_LFP2     (1<<15)
+#define   SWF10_PIPEB_EFP2     (1<<14)
+#define   SWF10_PIPEB_TV2      (1<<13)
+#define   SWF10_PIPEB_CRT2     (1<<12)
+#define   SWF10_PIPEB_LFP      (1<<11)
+#define   SWF10_PIPEB_EFP      (1<<10)
+#define   SWF10_PIPEB_TV       (1<<9)
+#define   SWF10_PIPEB_CRT      (1<<8)
+#define   SWF10_PIPEA_LFP2     (1<<7)
+#define   SWF10_PIPEA_EFP2     (1<<6)
+#define   SWF10_PIPEA_TV2      (1<<5)
+#define   SWF10_PIPEA_CRT2     (1<<4)
+#define   SWF10_PIPEA_LFP      (1<<3)
+#define   SWF10_PIPEA_EFP      (1<<2)
+#define   SWF10_PIPEA_TV       (1<<1)
+#define   SWF10_PIPEA_CRT      (1<<0)
+
+#define SWF11_MEMORY_SIZE_SHIFT        16
+#define SWF11_SV_TEST_EN       (1<<15)
+#define SWF11_IS_AGP           (1<<14)
+#define SWF11_DISPLAY_HOLDOFF  (1<<13)
+#define SWF11_DPMS_REDUCED     (1<<12)
+#define SWF11_IS_VBE_MODE      (1<<11)
+#define SWF11_PIPEB_ACCESS     (1<<10) /* 0 here means pipe a */
+#define SWF11_DPMS_MASK                0x07
+#define   SWF11_DPMS_OFF       (1<<2)
+#define   SWF11_DPMS_SUSPEND   (1<<1)
+#define   SWF11_DPMS_STANDBY   (1<<0)
+#define   SWF11_DPMS_ON                0
+
+#define SWF14_GFX_PFIT_EN      (1<<31)
+#define SWF14_TEXT_PFIT_EN     (1<<30)
+#define SWF14_LID_STATUS_CLOSED        (1<<29) /* 0 here means open */
+#define SWF14_POPUP_EN         (1<<28)
+#define SWF14_DISPLAY_HOLDOFF  (1<<27)
+#define SWF14_DISP_DETECT_EN   (1<<26)
+#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
+#define SWF14_DRIVER_STATUS    (1<<24)
+#define SWF14_OS_TYPE_WIN9X    (1<<23)
+#define SWF14_OS_TYPE_WINNT    (1<<22)
+/* 21:19 rsvd */
+#define SWF14_PM_TYPE_MASK     0x00070000
+#define   SWF14_PM_ACPI_VIDEO  (0x4 << 16)
+#define   SWF14_PM_ACPI                (0x3 << 16)
+#define   SWF14_PM_APM_12      (0x2 << 16)
+#define   SWF14_PM_APM_11      (0x1 << 16)
+#define SWF14_HK_REQUEST_MASK  0x0000ffff /* see GR18 6:3 for event type */
+         /* if GR18 indicates a display switch */
+#define   SWF14_DS_PIPEB_LFP2_EN (1<<15)
+#define   SWF14_DS_PIPEB_EFP2_EN (1<<14)
+#define   SWF14_DS_PIPEB_TV2_EN  (1<<13)
+#define   SWF14_DS_PIPEB_CRT2_EN (1<<12)
+#define   SWF14_DS_PIPEB_LFP_EN  (1<<11)
+#define   SWF14_DS_PIPEB_EFP_EN  (1<<10)
+#define   SWF14_DS_PIPEB_TV_EN  (1<<9)
+#define   SWF14_DS_PIPEB_CRT_EN  (1<<8)
+#define   SWF14_DS_PIPEA_LFP2_EN (1<<7)
+#define   SWF14_DS_PIPEA_EFP2_EN (1<<6)
+#define   SWF14_DS_PIPEA_TV2_EN  (1<<5)
+#define   SWF14_DS_PIPEA_CRT2_EN (1<<4)
+#define   SWF14_DS_PIPEA_LFP_EN  (1<<3)
+#define   SWF14_DS_PIPEA_EFP_EN  (1<<2)
+#define   SWF14_DS_PIPEA_TV_EN  (1<<1)
+#define   SWF14_DS_PIPEA_CRT_EN  (1<<0)
+         /* if GR18 indicates a panel fitting request */
+#define   SWF14_PFIT_EN                (1<<0) /* 0 means disable */
+         /* if GR18 indicates an APM change request */
+#define   SWF14_APM_HIBERNATE  0x4
+#define   SWF14_APM_SUSPEND    0x3
+#define   SWF14_APM_STANDBY    0x1
+#define   SWF14_APM_RESTORE    0x0
+
+#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
new file mode 100644 (file)
index 0000000..147584a
--- /dev/null
@@ -0,0 +1,493 @@
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright Â© 2006-2008,2010 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ *     Chris Wilson <chris@chris-wilson.co.uk>
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include "drmP.h"
+#include "drm.h"
+#include "psb_intel_drv.h"
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+
+#define _wait_for(COND, MS, W) ({ \
+       unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);       \
+       int ret__ = 0;                                                  \
+       while (! (COND)) {                                              \
+               if (time_after(jiffies, timeout__)) {                   \
+                       ret__ = -ETIMEDOUT;                             \
+                       break;                                          \
+               }                                                       \
+               if (W && !(in_atomic() || in_dbg_master())) msleep(W);  \
+       }                                                               \
+       ret__;                                                          \
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+
+/* Intel GPIO access functions */
+
+#define I2C_RISEFALL_TIME 20
+
+static inline struct intel_gmbus *
+to_intel_gmbus(struct i2c_adapter *i2c)
+{
+       return container_of(i2c, struct intel_gmbus, adapter);
+}
+
+struct intel_gpio {
+       struct i2c_adapter adapter;
+       struct i2c_algo_bit_data algo;
+       struct drm_psb_private *dev_priv;
+       u32 reg;
+};
+
+void
+gma_intel_i2c_reset(struct drm_device *dev)
+{
+       REG_WRITE(GMBUS0, 0);
+}
+
+static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
+{
+       /* When using bit bashing for I2C, this bit needs to be set to 1 */
+       /* FIXME: We are never Pineview, right?
+
+       u32 val;
+
+       if (!IS_PINEVIEW(dev_priv->dev))
+               return;
+
+       val = REG_READ(DSPCLK_GATE_D);
+       if (enable)
+               val |= DPCUNIT_CLOCK_GATE_DISABLE;
+       else
+               val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
+       REG_WRITE(DSPCLK_GATE_D, val);
+
+       return;
+       */
+}
+
+static u32 get_reserved(struct intel_gpio *gpio)
+{
+       struct drm_psb_private *dev_priv = gpio->dev_priv;
+       struct drm_device *dev = dev_priv->dev;
+       u32 reserved = 0;
+
+       /* On most chips, these bits must be preserved in software. */
+       reserved = REG_READ(gpio->reg) &
+                                    (GPIO_DATA_PULLUP_DISABLE |
+                                     GPIO_CLOCK_PULLUP_DISABLE);
+
+       return reserved;
+}
+
+static int get_clock(void *data)
+{
+       struct intel_gpio *gpio = data;
+       struct drm_psb_private *dev_priv = gpio->dev_priv;
+       struct drm_device *dev = dev_priv->dev;
+       u32 reserved = get_reserved(gpio);
+       REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+       REG_WRITE(gpio->reg, reserved);
+       return (REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
+}
+
+static int get_data(void *data)
+{
+       struct intel_gpio *gpio = data;
+       struct drm_psb_private *dev_priv = gpio->dev_priv;
+       struct drm_device *dev = dev_priv->dev;
+       u32 reserved = get_reserved(gpio);
+       REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+       REG_WRITE(gpio->reg, reserved);
+       return (REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
+}
+
+static void set_clock(void *data, int state_high)
+{
+       struct intel_gpio *gpio = data;
+       struct drm_psb_private *dev_priv = gpio->dev_priv;
+       struct drm_device *dev = dev_priv->dev;
+       u32 reserved = get_reserved(gpio);
+       u32 clock_bits;
+
+       if (state_high)
+               clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+       else
+               clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+                       GPIO_CLOCK_VAL_MASK;
+
+       REG_WRITE(gpio->reg, reserved | clock_bits);
+       REG_READ(gpio->reg); /* Posting */
+}
+
+static void set_data(void *data, int state_high)
+{
+       struct intel_gpio *gpio = data;
+       struct drm_psb_private *dev_priv = gpio->dev_priv;
+       struct drm_device *dev = dev_priv->dev;
+       u32 reserved = get_reserved(gpio);
+       u32 data_bits;
+
+       if (state_high)
+               data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+       else
+               data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+                       GPIO_DATA_VAL_MASK;
+
+       REG_WRITE(gpio->reg, reserved | data_bits);
+       REG_READ(gpio->reg);
+}
+
+static struct i2c_adapter *
+intel_gpio_create(struct drm_psb_private *dev_priv, u32 pin)
+{
+       static const int map_pin_to_reg[] = {
+               0,
+               GPIOB,
+               GPIOA,
+               GPIOC,
+               GPIOD,
+               GPIOE,
+               0,
+               GPIOF,
+       };
+       struct intel_gpio *gpio;
+
+       if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
+               return NULL;
+
+       gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
+       if (gpio == NULL)
+               return NULL;
+
+       gpio->reg = map_pin_to_reg[pin];
+       gpio->dev_priv = dev_priv;
+
+       snprintf(gpio->adapter.name, sizeof(gpio->adapter.name),
+                "gma500 GPIO%c", "?BACDE?F"[pin]);
+       gpio->adapter.owner = THIS_MODULE;
+       gpio->adapter.algo_data = &gpio->algo;
+       gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
+       gpio->algo.setsda = set_data;
+       gpio->algo.setscl = set_clock;
+       gpio->algo.getsda = get_data;
+       gpio->algo.getscl = get_clock;
+       gpio->algo.udelay = I2C_RISEFALL_TIME;
+       gpio->algo.timeout = usecs_to_jiffies(2200);
+       gpio->algo.data = gpio;
+
+       if (i2c_bit_add_bus(&gpio->adapter))
+               goto out_free;
+
+       return &gpio->adapter;
+
+out_free:
+       kfree(gpio);
+       return NULL;
+}
+
+static int
+intel_i2c_quirk_xfer(struct drm_psb_private *dev_priv,
+                    struct i2c_adapter *adapter,
+                    struct i2c_msg *msgs,
+                    int num)
+{
+       struct intel_gpio *gpio = container_of(adapter,
+                                              struct intel_gpio,
+                                              adapter);
+       int ret;
+
+       gma_intel_i2c_reset(dev_priv->dev);
+
+       intel_i2c_quirk_set(dev_priv, true);
+       set_data(gpio, 1);
+       set_clock(gpio, 1);
+       udelay(I2C_RISEFALL_TIME);
+
+       ret = adapter->algo->master_xfer(adapter, msgs, num);
+
+       set_data(gpio, 1);
+       set_clock(gpio, 1);
+       intel_i2c_quirk_set(dev_priv, false);
+
+       return ret;
+}
+
+static int
+gmbus_xfer(struct i2c_adapter *adapter,
+          struct i2c_msg *msgs,
+          int num)
+{
+       struct intel_gmbus *bus = container_of(adapter,
+                                              struct intel_gmbus,
+                                              adapter);
+       struct drm_psb_private *dev_priv = adapter->algo_data;
+       struct drm_device *dev = dev_priv->dev;
+       int i, reg_offset;
+
+       if (bus->force_bit)
+               return intel_i2c_quirk_xfer(dev_priv,
+                                           bus->force_bit, msgs, num);
+
+       reg_offset = 0;
+
+       REG_WRITE(GMBUS0 + reg_offset, bus->reg0);
+
+       for (i = 0; i < num; i++) {
+               u16 len = msgs[i].len;
+               u8 *buf = msgs[i].buf;
+
+               if (msgs[i].flags & I2C_M_RD) {
+                       REG_WRITE(GMBUS1 + reg_offset,
+                                  GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
+                                  (len << GMBUS_BYTE_COUNT_SHIFT) |
+                                  (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+                                  GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+                       REG_READ(GMBUS2+reg_offset);
+                       do {
+                               u32 val, loop = 0;
+
+                               if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+                                       goto timeout;
+                               if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+                                       goto clear_err;
+
+                               val = REG_READ(GMBUS3 + reg_offset);
+                               do {
+                                       *buf++ = val & 0xff;
+                                       val >>= 8;
+                               } while (--len && ++loop < 4);
+                       } while (len);
+               } else {
+                       u32 val, loop;
+
+                       val = loop = 0;
+                       do {
+                               val |= *buf++ << (8 * loop);
+                       } while (--len && ++loop < 4);
+
+                       REG_WRITE(GMBUS3 + reg_offset, val);
+                       REG_WRITE(GMBUS1 + reg_offset,
+                                  (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
+                                  (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
+                                  (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+                                  GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+                       REG_READ(GMBUS2+reg_offset);
+
+                       while (len) {
+                               if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+                                       goto timeout;
+                               if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+                                       goto clear_err;
+
+                               val = loop = 0;
+                               do {
+                                       val |= *buf++ << (8 * loop);
+                               } while (--len && ++loop < 4);
+
+                               REG_WRITE(GMBUS3 + reg_offset, val);
+                               REG_READ(GMBUS2+reg_offset);
+                       }
+               }
+
+               if (i + 1 < num && wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
+                       goto timeout;
+               if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+                       goto clear_err;
+       }
+
+       goto done;
+
+clear_err:
+       /* Toggle the Software Clear Interrupt bit. This has the effect
+        * of resetting the GMBUS controller and so clearing the
+        * BUS_ERROR raised by the slave's NAK.
+        */
+       REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
+       REG_WRITE(GMBUS1 + reg_offset, 0);
+
+done:
+       /* Mark the GMBUS interface as disabled. We will re-enable it at the
+        * start of the next xfer, till then let it sleep.
+        */
+       REG_WRITE(GMBUS0 + reg_offset, 0);
+       return i;
+
+timeout:
+       DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
+                bus->reg0 & 0xff, bus->adapter.name);
+       REG_WRITE(GMBUS0 + reg_offset, 0);
+
+       /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
+       bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
+       if (!bus->force_bit)
+               return -ENOMEM;
+
+       return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
+}
+
+static u32 gmbus_func(struct i2c_adapter *adapter)
+{
+       struct intel_gmbus *bus = container_of(adapter,
+                                              struct intel_gmbus,
+                                              adapter);
+
+       if (bus->force_bit)
+               bus->force_bit->algo->functionality(bus->force_bit);
+
+       return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+               /* I2C_FUNC_10BIT_ADDR | */
+               I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+               I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
+}
+
+static const struct i2c_algorithm gmbus_algorithm = {
+       .master_xfer    = gmbus_xfer,
+       .functionality  = gmbus_func
+};
+
+/**
+ * intel_gmbus_setup - instantiate all Intel i2c GMBuses
+ * @dev: DRM device
+ */
+int gma_intel_setup_gmbus(struct drm_device *dev)
+{
+       static const char *names[GMBUS_NUM_PORTS] = {
+               "disabled",
+               "ssc",
+               "vga",
+               "panel",
+               "dpc",
+               "dpb",
+               "reserved",
+               "dpd",
+       };
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int ret, i;
+
+       dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
+                                 GFP_KERNEL);
+       if (dev_priv->gmbus == NULL)
+               return -ENOMEM;
+
+       for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+               struct intel_gmbus *bus = &dev_priv->gmbus[i];
+
+               bus->adapter.owner = THIS_MODULE;
+               bus->adapter.class = I2C_CLASS_DDC;
+               snprintf(bus->adapter.name,
+                        sizeof(bus->adapter.name),
+                        "gma500 gmbus %s",
+                        names[i]);
+
+               bus->adapter.dev.parent = &dev->pdev->dev;
+               bus->adapter.algo_data  = dev_priv;
+
+               bus->adapter.algo = &gmbus_algorithm;
+               ret = i2c_add_adapter(&bus->adapter);
+               if (ret)
+                       goto err;
+
+               /* By default use a conservative clock rate */
+               bus->reg0 = i | GMBUS_RATE_100KHZ;
+
+               /* XXX force bit banging until GMBUS is fully debugged */
+               bus->force_bit = intel_gpio_create(dev_priv, i);
+       }
+
+       gma_intel_i2c_reset(dev_priv->dev);
+
+       return 0;
+
+err:
+       while (--i) {
+               struct intel_gmbus *bus = &dev_priv->gmbus[i];
+               i2c_del_adapter(&bus->adapter);
+       }
+       kfree(dev_priv->gmbus);
+       dev_priv->gmbus = NULL;
+       return ret;
+}
+
+void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
+{
+       struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+       /* speed:
+        * 0x0 = 100 KHz
+        * 0x1 = 50 KHz
+        * 0x2 = 400 KHz
+        * 0x3 = 1000 Khz
+        */
+       bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
+}
+
+void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
+{
+       struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+       if (force_bit) {
+               if (bus->force_bit == NULL) {
+                       struct drm_psb_private *dev_priv = adapter->algo_data;
+                       bus->force_bit = intel_gpio_create(dev_priv,
+                                                          bus->reg0 & 0xff);
+               }
+       } else {
+               if (bus->force_bit) {
+                       i2c_del_adapter(bus->force_bit);
+                       kfree(bus->force_bit);
+                       bus->force_bit = NULL;
+               }
+       }
+}
+
+void gma_intel_teardown_gmbus(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int i;
+
+       if (dev_priv->gmbus == NULL)
+               return;
+
+       for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+               struct intel_gmbus *bus = &dev_priv->gmbus[i];
+               if (bus->force_bit) {
+                       i2c_del_adapter(bus->force_bit);
+                       kfree(bus->force_bit);
+               }
+               i2c_del_adapter(&bus->adapter);
+       }
+
+       kfree(dev_priv->gmbus);
+       dev_priv->gmbus = NULL;
+}
diff --git a/drivers/gpu/drm/gma500/intel_i2c.c b/drivers/gpu/drm/gma500/intel_i2c.c
new file mode 100644 (file)
index 0000000..98a28c2
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * Copyright Â© 2006-2007 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+
+/*
+ * Intel GPIO access functions
+ */
+
+#define I2C_RISEFALL_TIME 20
+
+static int get_clock(void *data)
+{
+       struct psb_intel_i2c_chan *chan = data;
+       struct drm_device *dev = chan->drm_dev;
+       u32 val;
+
+       val = REG_READ(chan->reg);
+       return (val & GPIO_CLOCK_VAL_IN) != 0;
+}
+
+static int get_data(void *data)
+{
+       struct psb_intel_i2c_chan *chan = data;
+       struct drm_device *dev = chan->drm_dev;
+       u32 val;
+
+       val = REG_READ(chan->reg);
+       return (val & GPIO_DATA_VAL_IN) != 0;
+}
+
+static void set_clock(void *data, int state_high)
+{
+       struct psb_intel_i2c_chan *chan = data;
+       struct drm_device *dev = chan->drm_dev;
+       u32 reserved = 0, clock_bits;
+
+       /* On most chips, these bits must be preserved in software. */
+       reserved =
+                   REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+                                          GPIO_CLOCK_PULLUP_DISABLE);
+
+       if (state_high)
+               clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+       else
+               clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+                   GPIO_CLOCK_VAL_MASK;
+       REG_WRITE(chan->reg, reserved | clock_bits);
+       udelay(I2C_RISEFALL_TIME);      /* wait for the line to change state */
+}
+
+static void set_data(void *data, int state_high)
+{
+       struct psb_intel_i2c_chan *chan = data;
+       struct drm_device *dev = chan->drm_dev;
+       u32 reserved = 0, data_bits;
+
+       /* On most chips, these bits must be preserved in software. */
+       reserved =
+                   REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+                                          GPIO_CLOCK_PULLUP_DISABLE);
+
+       if (state_high)
+               data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+       else
+               data_bits =
+                   GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+                   GPIO_DATA_VAL_MASK;
+
+       REG_WRITE(chan->reg, reserved | data_bits);
+       udelay(I2C_RISEFALL_TIME);      /* wait for the line to change state */
+}
+
+/**
+ * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
+ * @dev: DRM device
+ * @output: driver specific output device
+ * @reg: GPIO reg to use
+ * @name: name for this bus
+ *
+ * Creates and registers a new i2c bus with the Linux i2c layer, for use
+ * in output probing and control (e.g. DDC or SDVO control functions).
+ *
+ * Possible values for @reg include:
+ *   %GPIOA
+ *   %GPIOB
+ *   %GPIOC
+ *   %GPIOD
+ *   %GPIOE
+ *   %GPIOF
+ *   %GPIOG
+ *   %GPIOH
+ * see PRM for details on how these different busses are used.
+ */
+struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
+                                       const u32 reg, const char *name)
+{
+       struct psb_intel_i2c_chan *chan;
+
+       chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
+       if (!chan)
+               goto out_free;
+
+       chan->drm_dev = dev;
+       chan->reg = reg;
+       snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
+       chan->adapter.owner = THIS_MODULE;
+       chan->adapter.algo_data = &chan->algo;
+       chan->adapter.dev.parent = &dev->pdev->dev;
+       chan->algo.setsda = set_data;
+       chan->algo.setscl = set_clock;
+       chan->algo.getsda = get_data;
+       chan->algo.getscl = get_clock;
+       chan->algo.udelay = 20;
+       chan->algo.timeout = usecs_to_jiffies(2200);
+       chan->algo.data = chan;
+
+       i2c_set_adapdata(&chan->adapter, chan);
+
+       if (i2c_bit_add_bus(&chan->adapter))
+               goto out_free;
+
+       /* JJJ:  raise SCL and SDA? */
+       set_data(chan, 1);
+       set_clock(chan, 1);
+       udelay(20);
+
+       return chan;
+
+out_free:
+       kfree(chan);
+       return NULL;
+}
+
+/**
+ * psb_intel_i2c_destroy - unregister and free i2c bus resources
+ * @output: channel to free
+ *
+ * Unregister the adapter from the i2c layer, then free the structure.
+ */
+void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan)
+{
+       if (!chan)
+               return;
+
+       i2c_del_adapter(&chan->adapter);
+       kfree(chan);
+}
diff --git a/drivers/gpu/drm/gma500/intel_opregion.c b/drivers/gpu/drm/gma500/intel_opregion.c
new file mode 100644 (file)
index 0000000..d946bc1
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * FIXME: resolve with the i915 version
+ */
+
+#include "psb_drv.h"
+
+struct opregion_header {
+       u8 signature[16];
+       u32 size;
+       u32 opregion_ver;
+       u8 bios_ver[32];
+       u8 vbios_ver[16];
+       u8 driver_ver[16];
+       u32 mboxes;
+       u8 reserved[164];
+} __packed;
+
+struct opregion_apci {
+       /*FIXME: add it later*/
+} __packed;
+
+struct opregion_swsci {
+       /*FIXME: add it later*/
+} __packed;
+
+struct opregion_acpi {
+       /*FIXME: add it later*/
+} __packed;
+
+int gma_intel_opregion_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 opregion_phy;
+       void *base;
+       u32 *lid_state;
+
+       dev_priv->lid_state = NULL;
+
+       pci_read_config_dword(dev->pdev, 0xfc, &opregion_phy);
+       if (opregion_phy == 0)
+               return -ENOTSUPP;
+
+       base = ioremap(opregion_phy, 8*1024);
+       if (!base)
+               return -ENOMEM;
+
+       lid_state = base + 0x01ac;
+
+       dev_priv->lid_state = lid_state;
+       dev_priv->lid_last_state = readl(lid_state);
+       return 0;
+}
+
+int gma_intel_opregion_exit(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       if (dev_priv->lid_state)
+               iounmap(dev_priv->lid_state);
+       return 0;
+}
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
new file mode 100644 (file)
index 0000000..5eee9ad
--- /dev/null
@@ -0,0 +1,263 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+/* TODO
+ * - Split functions by vbt type
+ * - Make them all take drm_device
+ * - Check ioremap failures
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "mid_bios.h"
+
+static void mid_get_fuse_settings(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+       uint32_t fuse_value = 0;
+       uint32_t fuse_value_tmp = 0;
+
+#define FB_REG06 0xD0810600
+#define FB_MIPI_DISABLE  (1 << 11)
+#define FB_REG09 0xD0810900
+#define FB_REG09 0xD0810900
+#define FB_SKU_MASK  0x7000
+#define FB_SKU_SHIFT 12
+#define FB_SKU_100 0
+#define FB_SKU_100L 1
+#define FB_SKU_83 2
+       if (pci_root == NULL) {
+               WARN_ON(1);
+               return;
+       }
+
+
+       pci_write_config_dword(pci_root, 0xD0, FB_REG06);
+       pci_read_config_dword(pci_root, 0xD4, &fuse_value);
+
+       /* FB_MIPI_DISABLE doesn't mean LVDS on with Medfield */
+       if (IS_MRST(dev))
+               dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
+
+       DRM_INFO("internal display is %s\n",
+                dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
+
+        /* Prevent runtime suspend at start*/
+        if (dev_priv->iLVDS_enable) {
+               dev_priv->is_lvds_on = true;
+               dev_priv->is_mipi_on = false;
+       } else {
+               dev_priv->is_mipi_on = true;
+               dev_priv->is_lvds_on = false;
+       }
+
+       dev_priv->video_device_fuse = fuse_value;
+
+       pci_write_config_dword(pci_root, 0xD0, FB_REG09);
+       pci_read_config_dword(pci_root, 0xD4, &fuse_value);
+
+       dev_dbg(dev->dev, "SKU values is 0x%x.\n", fuse_value);
+       fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
+
+       dev_priv->fuse_reg_value = fuse_value;
+
+       switch (fuse_value_tmp) {
+       case FB_SKU_100:
+               dev_priv->core_freq = 200;
+               break;
+       case FB_SKU_100L:
+               dev_priv->core_freq = 100;
+               break;
+       case FB_SKU_83:
+               dev_priv->core_freq = 166;
+               break;
+       default:
+               dev_warn(dev->dev, "Invalid SKU values, SKU value = 0x%08x\n",
+                                                               fuse_value_tmp);
+               dev_priv->core_freq = 0;
+       }
+       dev_dbg(dev->dev, "LNC core clk is %dMHz.\n", dev_priv->core_freq);
+       pci_dev_put(pci_root);
+}
+
+/*
+ *     Get the revison ID, B0:D2:F0;0x08
+ */
+static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
+{
+       uint32_t platform_rev_id = 0;
+       struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+
+       if (pci_gfx_root == NULL) {
+               WARN_ON(1);
+               return;
+       }
+       pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id);
+       dev_priv->platform_rev_id = (uint8_t) platform_rev_id;
+       pci_dev_put(pci_gfx_root);
+       dev_dbg(dev_priv->dev->dev, "platform_rev_id is %x\n",
+                                       dev_priv->platform_rev_id);
+}
+
+static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
+       u32 addr;
+       u16 new_size;
+       u8 *vbt_virtual;
+       u8 bpi;
+       u8 number_desc = 0;
+       struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
+       struct gct_r10_timing_info ti;
+       void *pGCT;
+       struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+
+       /* Get the address of the platform config vbt, B0:D2:F0;0xFC */
+       pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
+       pci_dev_put(pci_gfx_root);
+
+       dev_dbg(dev->dev, "drm platform config address is %x\n", addr);
+
+       /* check for platform config address == 0. */
+       /* this means fw doesn't support vbt */
+
+       if (addr == 0) {
+               vbt->size = 0;
+               return;
+       }
+
+       /* get the virtual address of the vbt */
+       vbt_virtual = ioremap(addr, sizeof(*vbt));
+       if (vbt_virtual == NULL) {
+               vbt->size = 0;
+               return;
+       }
+
+       memcpy(vbt, vbt_virtual, sizeof(*vbt));
+       iounmap(vbt_virtual); /* Free virtual address space */
+
+       /* No matching signature don't process the data */
+       if (memcmp(vbt->signature, "$GCT", 4)) {
+               vbt->size = 0;
+               return;
+       }
+
+       dev_dbg(dev->dev, "GCT revision is %x\n", vbt->revision);
+
+       switch (vbt->revision) {
+       case 0:
+               vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
+                                       vbt->size - sizeof(*vbt) + 4);
+               pGCT = vbt->oaktrail_gct;
+               bpi = ((struct oaktrail_gct_v1 *)pGCT)->PD.BootPanelIndex;
+               dev_priv->gct_data.bpi = bpi;
+               dev_priv->gct_data.pt =
+                       ((struct oaktrail_gct_v1 *)pGCT)->PD.PanelType;
+               memcpy(&dev_priv->gct_data.DTD,
+                       &((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].DTD,
+                               sizeof(struct oaktrail_timing_info));
+               dev_priv->gct_data.Panel_Port_Control =
+                 ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
+               dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+                       ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
+               break;
+       case 1:
+               vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
+                                       vbt->size - sizeof(*vbt) + 4);
+               pGCT = vbt->oaktrail_gct;
+               bpi = ((struct oaktrail_gct_v2 *)pGCT)->PD.BootPanelIndex;
+               dev_priv->gct_data.bpi = bpi;
+               dev_priv->gct_data.pt =
+                       ((struct oaktrail_gct_v2 *)pGCT)->PD.PanelType;
+               memcpy(&dev_priv->gct_data.DTD,
+                       &((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].DTD,
+                               sizeof(struct oaktrail_timing_info));
+               dev_priv->gct_data.Panel_Port_Control =
+                 ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
+               dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+                       ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
+               break;
+       case 0x10:
+               /*header definition changed from rev 01 (v2) to rev 10h. */
+               /*so, some values have changed location*/
+               new_size = vbt->checksum; /*checksum contains lo size byte*/
+               /*LSB of oaktrail_gct contains hi size byte*/
+               new_size |= ((0xff & (unsigned int)(long)vbt->oaktrail_gct)) << 8;
+
+               vbt->checksum = vbt->size; /*size contains the checksum*/
+               if (new_size > 0xff)
+                       vbt->size = 0xff; /*restrict size to 255*/
+               else
+                       vbt->size = new_size;
+
+               /* number of descriptors defined in the GCT */
+               number_desc = ((0xff00 & (unsigned int)(long)vbt->oaktrail_gct)) >> 8;
+               bpi = ((0xff0000 & (unsigned int)(long)vbt->oaktrail_gct)) >> 16;
+               vbt->oaktrail_gct = ioremap(addr + GCT_R10_HEADER_SIZE,
+                               GCT_R10_DISPLAY_DESC_SIZE * number_desc);
+               pGCT = vbt->oaktrail_gct;
+               pGCT = (u8 *)pGCT + (bpi*GCT_R10_DISPLAY_DESC_SIZE);
+               dev_priv->gct_data.bpi = bpi; /*save boot panel id*/
+
+               /*copy the GCT display timings into a temp structure*/
+               memcpy(&ti, pGCT, sizeof(struct gct_r10_timing_info));
+
+               /*now copy the temp struct into the dev_priv->gct_data*/
+               dp_ti->pixel_clock = ti.pixel_clock;
+               dp_ti->hactive_hi = ti.hactive_hi;
+               dp_ti->hactive_lo = ti.hactive_lo;
+               dp_ti->hblank_hi = ti.hblank_hi;
+               dp_ti->hblank_lo = ti.hblank_lo;
+               dp_ti->hsync_offset_hi = ti.hsync_offset_hi;
+               dp_ti->hsync_offset_lo = ti.hsync_offset_lo;
+               dp_ti->hsync_pulse_width_hi = ti.hsync_pulse_width_hi;
+               dp_ti->hsync_pulse_width_lo = ti.hsync_pulse_width_lo;
+               dp_ti->vactive_hi = ti.vactive_hi;
+               dp_ti->vactive_lo = ti.vactive_lo;
+               dp_ti->vblank_hi = ti.vblank_hi;
+               dp_ti->vblank_lo = ti.vblank_lo;
+               dp_ti->vsync_offset_hi = ti.vsync_offset_hi;
+               dp_ti->vsync_offset_lo = ti.vsync_offset_lo;
+               dp_ti->vsync_pulse_width_hi = ti.vsync_pulse_width_hi;
+               dp_ti->vsync_pulse_width_lo = ti.vsync_pulse_width_lo;
+
+               /* Move the MIPI_Display_Descriptor data from GCT to dev priv */
+               dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+                                                       *((u8 *)pGCT + 0x0d);
+               dev_priv->gct_data.Panel_MIPI_Display_Descriptor |=
+                                               (*((u8 *)pGCT + 0x0e)) << 8;
+               break;
+       default:
+               dev_err(dev->dev, "Unknown revision of GCT!\n");
+               vbt->size = 0;
+       }
+}
+
+int mid_chip_setup(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       mid_get_fuse_settings(dev);
+       mid_get_vbt_data(dev_priv);
+       mid_get_pci_revID(dev_priv);
+       return 0;
+}
diff --git a/drivers/gpu/drm/gma500/mid_bios.h b/drivers/gpu/drm/gma500/mid_bios.h
new file mode 100644 (file)
index 0000000..00e7d56
--- /dev/null
@@ -0,0 +1,21 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+extern int mid_chip_setup(struct drm_device *dev);
+
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
new file mode 100644 (file)
index 0000000..c904d73
--- /dev/null
@@ -0,0 +1,858 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+
+/*
+ * Code for the SGX MMU:
+ */
+
+/*
+ * clflush on one processor only:
+ * clflush should apparently flush the cache line on all processors in an
+ * SMP system.
+ */
+
+/*
+ * kmap atomic:
+ * The usage of the slots must be completely encapsulated within a spinlock, and
+ * no other functions that may be using the locks for other purposed may be
+ * called from within the locked region.
+ * Since the slots are per processor, this will guarantee that we are the only
+ * user.
+ */
+
+/*
+ * TODO: Inserting ptes from an interrupt handler:
+ * This may be desirable for some SGX functionality where the GPU can fault in
+ * needed pages. For that, we need to make an atomic insert_pages function, that
+ * may fail.
+ * If it fails, the caller need to insert the page using a workqueue function,
+ * but on average it should be fast.
+ */
+
+struct psb_mmu_driver {
+       /* protects driver- and pd structures. Always take in read mode
+        * before taking the page table spinlock.
+        */
+       struct rw_semaphore sem;
+
+       /* protects page tables, directory tables and pt tables.
+        * and pt structures.
+        */
+       spinlock_t lock;
+
+       atomic_t needs_tlbflush;
+
+       uint8_t __iomem *register_map;
+       struct psb_mmu_pd *default_pd;
+       /*uint32_t bif_ctrl;*/
+       int has_clflush;
+       int clflush_add;
+       unsigned long clflush_mask;
+
+       struct drm_psb_private *dev_priv;
+};
+
+struct psb_mmu_pd;
+
+struct psb_mmu_pt {
+       struct psb_mmu_pd *pd;
+       uint32_t index;
+       uint32_t count;
+       struct page *p;
+       uint32_t *v;
+};
+
+struct psb_mmu_pd {
+       struct psb_mmu_driver *driver;
+       int hw_context;
+       struct psb_mmu_pt **tables;
+       struct page *p;
+       struct page *dummy_pt;
+       struct page *dummy_page;
+       uint32_t pd_mask;
+       uint32_t invalid_pde;
+       uint32_t invalid_pte;
+};
+
+static inline uint32_t psb_mmu_pt_index(uint32_t offset)
+{
+       return (offset >> PSB_PTE_SHIFT) & 0x3FF;
+}
+
+static inline uint32_t psb_mmu_pd_index(uint32_t offset)
+{
+       return offset >> PSB_PDE_SHIFT;
+}
+
+static inline void psb_clflush(void *addr)
+{
+       __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
+}
+
+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
+                                  void *addr)
+{
+       if (!driver->has_clflush)
+               return;
+
+       mb();
+       psb_clflush(addr);
+       mb();
+}
+
+static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page)
+{
+       uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT;
+       uint32_t clflush_count = PAGE_SIZE / clflush_add;
+       int i;
+       uint8_t *clf;
+
+       clf = kmap_atomic(page, KM_USER0);
+       mb();
+       for (i = 0; i < clflush_count; ++i) {
+               psb_clflush(clf);
+               clf += clflush_add;
+       }
+       mb();
+       kunmap_atomic(clf, KM_USER0);
+}
+
+static void psb_pages_clflush(struct psb_mmu_driver *driver,
+                               struct page *page[], unsigned long num_pages)
+{
+       int i;
+
+       if (!driver->has_clflush)
+               return ;
+
+       for (i = 0; i < num_pages; i++)
+               psb_page_clflush(driver, *page++);
+}
+
+static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
+                                   int force)
+{
+       atomic_set(&driver->needs_tlbflush, 0);
+}
+
+static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
+{
+       down_write(&driver->sem);
+       psb_mmu_flush_pd_locked(driver, force);
+       up_write(&driver->sem);
+}
+
+void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot)
+{
+       if (rc_prot)
+               down_write(&driver->sem);
+       if (rc_prot)
+               up_write(&driver->sem);
+}
+
+void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
+{
+       /*ttm_tt_cache_flush(&pd->p, 1);*/
+       psb_pages_clflush(pd->driver, &pd->p, 1);
+       down_write(&pd->driver->sem);
+       wmb();
+       psb_mmu_flush_pd_locked(pd->driver, 1);
+       pd->hw_context = hw_context;
+       up_write(&pd->driver->sem);
+
+}
+
+static inline unsigned long psb_pd_addr_end(unsigned long addr,
+                                           unsigned long end)
+{
+
+       addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
+       return (addr < end) ? addr : end;
+}
+
+static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
+{
+       uint32_t mask = PSB_PTE_VALID;
+
+       if (type & PSB_MMU_CACHED_MEMORY)
+               mask |= PSB_PTE_CACHED;
+       if (type & PSB_MMU_RO_MEMORY)
+               mask |= PSB_PTE_RO;
+       if (type & PSB_MMU_WO_MEMORY)
+               mask |= PSB_PTE_WO;
+
+       return (pfn << PAGE_SHIFT) | mask;
+}
+
+struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
+                                   int trap_pagefaults, int invalid_type)
+{
+       struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
+       uint32_t *v;
+       int i;
+
+       if (!pd)
+               return NULL;
+
+       pd->p = alloc_page(GFP_DMA32);
+       if (!pd->p)
+               goto out_err1;
+       pd->dummy_pt = alloc_page(GFP_DMA32);
+       if (!pd->dummy_pt)
+               goto out_err2;
+       pd->dummy_page = alloc_page(GFP_DMA32);
+       if (!pd->dummy_page)
+               goto out_err3;
+
+       if (!trap_pagefaults) {
+               pd->invalid_pde =
+                   psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
+                                    invalid_type);
+               pd->invalid_pte =
+                   psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
+                                    invalid_type);
+       } else {
+               pd->invalid_pde = 0;
+               pd->invalid_pte = 0;
+       }
+
+       v = kmap(pd->dummy_pt);
+       for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+               v[i] = pd->invalid_pte;
+
+       kunmap(pd->dummy_pt);
+
+       v = kmap(pd->p);
+       for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+               v[i] = pd->invalid_pde;
+
+       kunmap(pd->p);
+
+       clear_page(kmap(pd->dummy_page));
+       kunmap(pd->dummy_page);
+
+       pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
+       if (!pd->tables)
+               goto out_err4;
+
+       pd->hw_context = -1;
+       pd->pd_mask = PSB_PTE_VALID;
+       pd->driver = driver;
+
+       return pd;
+
+out_err4:
+       __free_page(pd->dummy_page);
+out_err3:
+       __free_page(pd->dummy_pt);
+out_err2:
+       __free_page(pd->p);
+out_err1:
+       kfree(pd);
+       return NULL;
+}
+
+void psb_mmu_free_pt(struct psb_mmu_pt *pt)
+{
+       __free_page(pt->p);
+       kfree(pt);
+}
+
+void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
+{
+       struct psb_mmu_driver *driver = pd->driver;
+       struct psb_mmu_pt *pt;
+       int i;
+
+       down_write(&driver->sem);
+       if (pd->hw_context != -1)
+               psb_mmu_flush_pd_locked(driver, 1);
+
+       /* Should take the spinlock here, but we don't need to do that
+          since we have the semaphore in write mode. */
+
+       for (i = 0; i < 1024; ++i) {
+               pt = pd->tables[i];
+               if (pt)
+                       psb_mmu_free_pt(pt);
+       }
+
+       vfree(pd->tables);
+       __free_page(pd->dummy_page);
+       __free_page(pd->dummy_pt);
+       __free_page(pd->p);
+       kfree(pd);
+       up_write(&driver->sem);
+}
+
+static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
+{
+       struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
+       void *v;
+       uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
+       uint32_t clflush_count = PAGE_SIZE / clflush_add;
+       spinlock_t *lock = &pd->driver->lock;
+       uint8_t *clf;
+       uint32_t *ptes;
+       int i;
+
+       if (!pt)
+               return NULL;
+
+       pt->p = alloc_page(GFP_DMA32);
+       if (!pt->p) {
+               kfree(pt);
+               return NULL;
+       }
+
+       spin_lock(lock);
+
+       v = kmap_atomic(pt->p, KM_USER0);
+       clf = (uint8_t *) v;
+       ptes = (uint32_t *) v;
+       for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+               *ptes++ = pd->invalid_pte;
+
+
+       if (pd->driver->has_clflush && pd->hw_context != -1) {
+               mb();
+               for (i = 0; i < clflush_count; ++i) {
+                       psb_clflush(clf);
+                       clf += clflush_add;
+               }
+               mb();
+       }
+
+       kunmap_atomic(v, KM_USER0);
+       spin_unlock(lock);
+
+       pt->count = 0;
+       pt->pd = pd;
+       pt->index = 0;
+
+       return pt;
+}
+
+struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
+                                            unsigned long addr)
+{
+       uint32_t index = psb_mmu_pd_index(addr);
+       struct psb_mmu_pt *pt;
+       uint32_t *v;
+       spinlock_t *lock = &pd->driver->lock;
+
+       spin_lock(lock);
+       pt = pd->tables[index];
+       while (!pt) {
+               spin_unlock(lock);
+               pt = psb_mmu_alloc_pt(pd);
+               if (!pt)
+                       return NULL;
+               spin_lock(lock);
+
+               if (pd->tables[index]) {
+                       spin_unlock(lock);
+                       psb_mmu_free_pt(pt);
+                       spin_lock(lock);
+                       pt = pd->tables[index];
+                       continue;
+               }
+
+               v = kmap_atomic(pd->p, KM_USER0);
+               pd->tables[index] = pt;
+               v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
+               pt->index = index;
+               kunmap_atomic((void *) v, KM_USER0);
+
+               if (pd->hw_context != -1) {
+                       psb_mmu_clflush(pd->driver, (void *) &v[index]);
+                       atomic_set(&pd->driver->needs_tlbflush, 1);
+               }
+       }
+       pt->v = kmap_atomic(pt->p, KM_USER0);
+       return pt;
+}
+
+static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
+                                             unsigned long addr)
+{
+       uint32_t index = psb_mmu_pd_index(addr);
+       struct psb_mmu_pt *pt;
+       spinlock_t *lock = &pd->driver->lock;
+
+       spin_lock(lock);
+       pt = pd->tables[index];
+       if (!pt) {
+               spin_unlock(lock);
+               return NULL;
+       }
+       pt->v = kmap_atomic(pt->p, KM_USER0);
+       return pt;
+}
+
+static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
+{
+       struct psb_mmu_pd *pd = pt->pd;
+       uint32_t *v;
+
+       kunmap_atomic(pt->v, KM_USER0);
+       if (pt->count == 0) {
+               v = kmap_atomic(pd->p, KM_USER0);
+               v[pt->index] = pd->invalid_pde;
+               pd->tables[pt->index] = NULL;
+
+               if (pd->hw_context != -1) {
+                       psb_mmu_clflush(pd->driver,
+                                       (void *) &v[pt->index]);
+                       atomic_set(&pd->driver->needs_tlbflush, 1);
+               }
+               kunmap_atomic(pt->v, KM_USER0);
+               spin_unlock(&pd->driver->lock);
+               psb_mmu_free_pt(pt);
+               return;
+       }
+       spin_unlock(&pd->driver->lock);
+}
+
+static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
+                                  unsigned long addr, uint32_t pte)
+{
+       pt->v[psb_mmu_pt_index(addr)] = pte;
+}
+
+static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
+                                         unsigned long addr)
+{
+       pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
+}
+
+
+void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
+                       uint32_t mmu_offset, uint32_t gtt_start,
+                       uint32_t gtt_pages)
+{
+       uint32_t *v;
+       uint32_t start = psb_mmu_pd_index(mmu_offset);
+       struct psb_mmu_driver *driver = pd->driver;
+       int num_pages = gtt_pages;
+
+       down_read(&driver->sem);
+       spin_lock(&driver->lock);
+
+       v = kmap_atomic(pd->p, KM_USER0);
+       v += start;
+
+       while (gtt_pages--) {
+               *v++ = gtt_start | pd->pd_mask;
+               gtt_start += PAGE_SIZE;
+       }
+
+       /*ttm_tt_cache_flush(&pd->p, num_pages);*/
+       psb_pages_clflush(pd->driver, &pd->p, num_pages);
+       kunmap_atomic(v, KM_USER0);
+       spin_unlock(&driver->lock);
+
+       if (pd->hw_context != -1)
+               atomic_set(&pd->driver->needs_tlbflush, 1);
+
+       up_read(&pd->driver->sem);
+       psb_mmu_flush_pd(pd->driver, 0);
+}
+
+struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
+{
+       struct psb_mmu_pd *pd;
+
+       /* down_read(&driver->sem); */
+       pd = driver->default_pd;
+       /* up_read(&driver->sem); */
+
+       return pd;
+}
+
+/* Returns the physical address of the PD shared by sgx/msvdx */
+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
+{
+       struct psb_mmu_pd *pd;
+
+       pd = psb_mmu_get_default_pd(driver);
+       return page_to_pfn(pd->p) << PAGE_SHIFT;
+}
+
+void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
+{
+       psb_mmu_free_pagedir(driver->default_pd);
+       kfree(driver);
+}
+
+struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
+                                       int trap_pagefaults,
+                                       int invalid_type,
+                                       struct drm_psb_private *dev_priv)
+{
+       struct psb_mmu_driver *driver;
+
+       driver = kmalloc(sizeof(*driver), GFP_KERNEL);
+
+       if (!driver)
+               return NULL;
+       driver->dev_priv = dev_priv;
+
+       driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
+                                             invalid_type);
+       if (!driver->default_pd)
+               goto out_err1;
+
+       spin_lock_init(&driver->lock);
+       init_rwsem(&driver->sem);
+       down_write(&driver->sem);
+       driver->register_map = registers;
+       atomic_set(&driver->needs_tlbflush, 1);
+
+       driver->has_clflush = 0;
+
+       if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
+               uint32_t tfms, misc, cap0, cap4, clflush_size;
+
+               /*
+                * clflush size is determined at kernel setup for x86_64
+                *  but not for i386. We have to do it here.
+                */
+
+               cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
+               clflush_size = ((misc >> 8) & 0xff) * 8;
+               driver->has_clflush = 1;
+               driver->clflush_add =
+                   PAGE_SIZE * clflush_size / sizeof(uint32_t);
+               driver->clflush_mask = driver->clflush_add - 1;
+               driver->clflush_mask = ~driver->clflush_mask;
+       }
+
+       up_write(&driver->sem);
+       return driver;
+
+out_err1:
+       kfree(driver);
+       return NULL;
+}
+
+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
+                              unsigned long address, uint32_t num_pages,
+                              uint32_t desired_tile_stride,
+                              uint32_t hw_tile_stride)
+{
+       struct psb_mmu_pt *pt;
+       uint32_t rows = 1;
+       uint32_t i;
+       unsigned long addr;
+       unsigned long end;
+       unsigned long next;
+       unsigned long add;
+       unsigned long row_add;
+       unsigned long clflush_add = pd->driver->clflush_add;
+       unsigned long clflush_mask = pd->driver->clflush_mask;
+
+       if (!pd->driver->has_clflush) {
+               /*ttm_tt_cache_flush(&pd->p, num_pages);*/
+               psb_pages_clflush(pd->driver, &pd->p, num_pages);
+               return;
+       }
+
+       if (hw_tile_stride)
+               rows = num_pages / desired_tile_stride;
+       else
+               desired_tile_stride = num_pages;
+
+       add = desired_tile_stride << PAGE_SHIFT;
+       row_add = hw_tile_stride << PAGE_SHIFT;
+       mb();
+       for (i = 0; i < rows; ++i) {
+
+               addr = address;
+               end = addr + add;
+
+               do {
+                       next = psb_pd_addr_end(addr, end);
+                       pt = psb_mmu_pt_map_lock(pd, addr);
+                       if (!pt)
+                               continue;
+                       do {
+                               psb_clflush(&pt->v
+                                           [psb_mmu_pt_index(addr)]);
+                       } while (addr +=
+                                clflush_add,
+                                (addr & clflush_mask) < next);
+
+                       psb_mmu_pt_unmap_unlock(pt);
+               } while (addr = next, next != end);
+               address += row_add;
+       }
+       mb();
+}
+
+void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
+                                unsigned long address, uint32_t num_pages)
+{
+       struct psb_mmu_pt *pt;
+       unsigned long addr;
+       unsigned long end;
+       unsigned long next;
+       unsigned long f_address = address;
+
+       down_read(&pd->driver->sem);
+
+       addr = address;
+       end = addr + (num_pages << PAGE_SHIFT);
+
+       do {
+               next = psb_pd_addr_end(addr, end);
+               pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+               if (!pt)
+                       goto out;
+               do {
+                       psb_mmu_invalidate_pte(pt, addr);
+                       --pt->count;
+               } while (addr += PAGE_SIZE, addr < next);
+               psb_mmu_pt_unmap_unlock(pt);
+
+       } while (addr = next, next != end);
+
+out:
+       if (pd->hw_context != -1)
+               psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
+
+       up_read(&pd->driver->sem);
+
+       if (pd->hw_context != -1)
+               psb_mmu_flush(pd->driver, 0);
+
+       return;
+}
+
+void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
+                         uint32_t num_pages, uint32_t desired_tile_stride,
+                         uint32_t hw_tile_stride)
+{
+       struct psb_mmu_pt *pt;
+       uint32_t rows = 1;
+       uint32_t i;
+       unsigned long addr;
+       unsigned long end;
+       unsigned long next;
+       unsigned long add;
+       unsigned long row_add;
+       unsigned long f_address = address;
+
+       if (hw_tile_stride)
+               rows = num_pages / desired_tile_stride;
+       else
+               desired_tile_stride = num_pages;
+
+       add = desired_tile_stride << PAGE_SHIFT;
+       row_add = hw_tile_stride << PAGE_SHIFT;
+
+       /* down_read(&pd->driver->sem); */
+
+       /* Make sure we only need to flush this processor's cache */
+
+       for (i = 0; i < rows; ++i) {
+
+               addr = address;
+               end = addr + add;
+
+               do {
+                       next = psb_pd_addr_end(addr, end);
+                       pt = psb_mmu_pt_map_lock(pd, addr);
+                       if (!pt)
+                               continue;
+                       do {
+                               psb_mmu_invalidate_pte(pt, addr);
+                               --pt->count;
+
+                       } while (addr += PAGE_SIZE, addr < next);
+                       psb_mmu_pt_unmap_unlock(pt);
+
+               } while (addr = next, next != end);
+               address += row_add;
+       }
+       if (pd->hw_context != -1)
+               psb_mmu_flush_ptes(pd, f_address, num_pages,
+                                  desired_tile_stride, hw_tile_stride);
+
+       /* up_read(&pd->driver->sem); */
+
+       if (pd->hw_context != -1)
+               psb_mmu_flush(pd->driver, 0);
+}
+
+int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
+                               unsigned long address, uint32_t num_pages,
+                               int type)
+{
+       struct psb_mmu_pt *pt;
+       uint32_t pte;
+       unsigned long addr;
+       unsigned long end;
+       unsigned long next;
+       unsigned long f_address = address;
+       int ret = 0;
+
+       down_read(&pd->driver->sem);
+
+       addr = address;
+       end = addr + (num_pages << PAGE_SHIFT);
+
+       do {
+               next = psb_pd_addr_end(addr, end);
+               pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+               if (!pt) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               do {
+                       pte = psb_mmu_mask_pte(start_pfn++, type);
+                       psb_mmu_set_pte(pt, addr, pte);
+                       pt->count++;
+               } while (addr += PAGE_SIZE, addr < next);
+               psb_mmu_pt_unmap_unlock(pt);
+
+       } while (addr = next, next != end);
+
+out:
+       if (pd->hw_context != -1)
+               psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
+
+       up_read(&pd->driver->sem);
+
+       if (pd->hw_context != -1)
+               psb_mmu_flush(pd->driver, 1);
+
+       return ret;
+}
+
+int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
+                        unsigned long address, uint32_t num_pages,
+                        uint32_t desired_tile_stride,
+                        uint32_t hw_tile_stride, int type)
+{
+       struct psb_mmu_pt *pt;
+       uint32_t rows = 1;
+       uint32_t i;
+       uint32_t pte;
+       unsigned long addr;
+       unsigned long end;
+       unsigned long next;
+       unsigned long add;
+       unsigned long row_add;
+       unsigned long f_address = address;
+       int ret = 0;
+
+       if (hw_tile_stride) {
+               if (num_pages % desired_tile_stride != 0)
+                       return -EINVAL;
+               rows = num_pages / desired_tile_stride;
+       } else {
+               desired_tile_stride = num_pages;
+       }
+
+       add = desired_tile_stride << PAGE_SHIFT;
+       row_add = hw_tile_stride << PAGE_SHIFT;
+
+       down_read(&pd->driver->sem);
+
+       for (i = 0; i < rows; ++i) {
+
+               addr = address;
+               end = addr + add;
+
+               do {
+                       next = psb_pd_addr_end(addr, end);
+                       pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+                       if (!pt) {
+                               ret = -ENOMEM;
+                               goto out;
+                       }
+                       do {
+                               pte =
+                                   psb_mmu_mask_pte(page_to_pfn(*pages++),
+                                                    type);
+                               psb_mmu_set_pte(pt, addr, pte);
+                               pt->count++;
+                       } while (addr += PAGE_SIZE, addr < next);
+                       psb_mmu_pt_unmap_unlock(pt);
+
+               } while (addr = next, next != end);
+
+               address += row_add;
+       }
+out:
+       if (pd->hw_context != -1)
+               psb_mmu_flush_ptes(pd, f_address, num_pages,
+                                  desired_tile_stride, hw_tile_stride);
+
+       up_read(&pd->driver->sem);
+
+       if (pd->hw_context != -1)
+               psb_mmu_flush(pd->driver, 1);
+
+       return ret;
+}
+
+int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
+                          unsigned long *pfn)
+{
+       int ret;
+       struct psb_mmu_pt *pt;
+       uint32_t tmp;
+       spinlock_t *lock = &pd->driver->lock;
+
+       down_read(&pd->driver->sem);
+       pt = psb_mmu_pt_map_lock(pd, virtual);
+       if (!pt) {
+               uint32_t *v;
+
+               spin_lock(lock);
+               v = kmap_atomic(pd->p, KM_USER0);
+               tmp = v[psb_mmu_pd_index(virtual)];
+               kunmap_atomic(v, KM_USER0);
+               spin_unlock(lock);
+
+               if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
+                   !(pd->invalid_pte & PSB_PTE_VALID)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               ret = 0;
+               *pfn = pd->invalid_pte >> PAGE_SHIFT;
+               goto out;
+       }
+       tmp = pt->v[psb_mmu_pt_index(virtual)];
+       if (!(tmp & PSB_PTE_VALID)) {
+               ret = -EINVAL;
+       } else {
+               ret = 0;
+               *pfn = tmp >> PAGE_SHIFT;
+       }
+       psb_mmu_pt_unmap_unlock(pt);
+out:
+       up_read(&pd->driver->sem);
+       return ret;
+}
diff --git a/drivers/gpu/drm/gma500/oaktrail.h b/drivers/gpu/drm/gma500/oaktrail.h
new file mode 100644 (file)
index 0000000..2da1f36
--- /dev/null
@@ -0,0 +1,252 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+/* MID device specific descriptors */
+
+struct oaktrail_vbt {
+       s8 signature[4];        /*4 bytes,"$GCT" */
+       u8 revision;
+       u8 size;
+       u8 checksum;
+       void *oaktrail_gct;
+} __packed;
+
+struct oaktrail_timing_info {
+       u16 pixel_clock;
+       u8 hactive_lo;
+       u8 hblank_lo;
+       u8 hblank_hi:4;
+       u8 hactive_hi:4;
+       u8 vactive_lo;
+       u8 vblank_lo;
+       u8 vblank_hi:4;
+       u8 vactive_hi:4;
+       u8 hsync_offset_lo;
+       u8 hsync_pulse_width_lo;
+       u8 vsync_pulse_width_lo:4;
+       u8 vsync_offset_lo:4;
+       u8 vsync_pulse_width_hi:2;
+       u8 vsync_offset_hi:2;
+       u8 hsync_pulse_width_hi:2;
+       u8 hsync_offset_hi:2;
+       u8 width_mm_lo;
+       u8 height_mm_lo;
+       u8 height_mm_hi:4;
+       u8 width_mm_hi:4;
+       u8 hborder;
+       u8 vborder;
+       u8 unknown0:1;
+       u8 hsync_positive:1;
+       u8 vsync_positive:1;
+       u8 separate_sync:2;
+       u8 stereo:1;
+       u8 unknown6:1;
+       u8 interlaced:1;
+} __packed;
+
+struct gct_r10_timing_info {
+       u16 pixel_clock;
+       u32 hactive_lo:8;
+       u32 hactive_hi:4;
+       u32 hblank_lo:8;
+       u32 hblank_hi:4;
+       u32 hsync_offset_lo:8;
+       u16 hsync_offset_hi:2;
+       u16 hsync_pulse_width_lo:8;
+       u16 hsync_pulse_width_hi:2;
+       u16 hsync_positive:1;
+       u16 rsvd_1:3;
+       u8  vactive_lo:8;
+       u16 vactive_hi:4;
+       u16 vblank_lo:8;
+       u16 vblank_hi:4;
+       u16 vsync_offset_lo:4;
+       u16 vsync_offset_hi:2;
+       u16 vsync_pulse_width_lo:4;
+       u16 vsync_pulse_width_hi:2;
+       u16 vsync_positive:1;
+       u16 rsvd_2:3;
+} __packed;
+
+struct oaktrail_panel_descriptor_v1 {
+       u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
+                               /* 0x61190 if MIPI */
+       u32 Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
+       u32 Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+       u32 Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 dword */
+                                               /* Register 0x61210 */
+       struct oaktrail_timing_info DTD;/*18 bytes, Standard definition */
+       u16 Panel_Backlight_Inverter_Descriptor;/* 16 bits, as follows */
+                               /* Bit 0, Frequency, 15 bits,0 - 32767Hz */
+                       /* Bit 15, Polarity, 1 bit, 0: Normal, 1: Inverted */
+       u16 Panel_MIPI_Display_Descriptor;
+                       /*16 bits, Defined as follows: */
+                       /* if MIPI, 0x0000 if LVDS */
+                       /* Bit 0, Type, 2 bits, */
+                       /* 0: Type-1, */
+                       /* 1: Type-2, */
+                       /* 2: Type-3, */
+                       /* 3: Type-4 */
+                       /* Bit 2, Pixel Format, 4 bits */
+                       /* Bit0: 16bpp (not supported in LNC), */
+                       /* Bit1: 18bpp loosely packed, */
+                       /* Bit2: 18bpp packed, */
+                       /* Bit3: 24bpp */
+                       /* Bit 6, Reserved, 2 bits, 00b */
+                       /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
+                       /* Bit 14, Reserved, 2 bits, 00b */
+} __packed;
+
+struct oaktrail_panel_descriptor_v2 {
+       u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
+                               /* 0x61190 if MIPI */
+       u32 Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
+       u32 Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+       u8 Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 byte */
+                                               /* Register 0x61210 */
+       struct oaktrail_timing_info DTD;/*18 bytes, Standard definition */
+       u16 Panel_Backlight_Inverter_Descriptor;/*16 bits, as follows*/
+                               /*Bit 0, Frequency, 16 bits, 0 - 32767Hz*/
+       u8 Panel_Initial_Brightness;/* [7:0] 0 - 100% */
+                       /*Bit 7, Polarity, 1 bit,0: Normal, 1: Inverted*/
+       u16 Panel_MIPI_Display_Descriptor;
+                       /*16 bits, Defined as follows: */
+                       /* if MIPI, 0x0000 if LVDS */
+                       /* Bit 0, Type, 2 bits, */
+                       /* 0: Type-1, */
+                       /* 1: Type-2, */
+                       /* 2: Type-3, */
+                       /* 3: Type-4 */
+                       /* Bit 2, Pixel Format, 4 bits */
+                       /* Bit0: 16bpp (not supported in LNC), */
+                       /* Bit1: 18bpp loosely packed, */
+                       /* Bit2: 18bpp packed, */
+                       /* Bit3: 24bpp */
+                       /* Bit 6, Reserved, 2 bits, 00b */
+                       /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
+                       /* Bit 14, Reserved, 2 bits, 00b */
+} __packed;
+
+union oaktrail_panel_rx {
+       struct {
+               u16 NumberOfLanes:2; /*Num of Lanes, 2 bits,0 = 1 lane,*/
+                       /* 1 = 2 lanes, 2 = 3 lanes, 3 = 4 lanes. */
+               u16 MaxLaneFreq:3; /* 0: 100MHz, 1: 200MHz, 2: 300MHz, */
+               /*3: 400MHz, 4: 500MHz, 5: 600MHz, 6: 700MHz, 7: 800MHz.*/
+               u16 SupportedVideoTransferMode:2; /*0: Non-burst only */
+                                       /* 1: Burst and non-burst */
+                                       /* 2/3: Reserved */
+               u16 HSClkBehavior:1; /*0: Continuous, 1: Non-continuous*/
+               u16 DuoDisplaySupport:1; /*1 bit,0: No, 1: Yes*/
+               u16 ECC_ChecksumCapabilities:1;/*1 bit,0: No, 1: Yes*/
+               u16 BidirectionalCommunication:1;/*1 bit,0: No, 1: Yes */
+               u16 Rsvd:5;/*5 bits,00000b */
+       } panelrx;
+       u16 panel_receiver;
+} __packed;
+
+struct oaktrail_gct_v1 {
+       union { /*8 bits,Defined as follows: */
+               struct {
+                       u8 PanelType:4; /*4 bits, Bit field for panels*/
+                                       /* 0 - 3: 0 = LVDS, 1 = MIPI*/
+                                       /*2 bits,Specifies which of the*/
+                       u8 BootPanelIndex:2;
+                                       /* 4 panels to use by default*/
+                       u8 BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
+                                       /* the 4 MIPI DSI receivers to use*/
+               } PD;
+               u8 PanelDescriptor;
+       };
+       struct oaktrail_panel_descriptor_v1 panel[4];/*panel descrs,38 bytes each*/
+       union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
+} __packed;
+
+struct oaktrail_gct_v2 {
+       union { /*8 bits,Defined as follows: */
+               struct {
+                       u8 PanelType:4; /*4 bits, Bit field for panels*/
+                                       /* 0 - 3: 0 = LVDS, 1 = MIPI*/
+                                       /*2 bits,Specifies which of the*/
+                       u8 BootPanelIndex:2;
+                                       /* 4 panels to use by default*/
+                       u8 BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
+                                       /* the 4 MIPI DSI receivers to use*/
+               } PD;
+               u8 PanelDescriptor;
+       };
+       struct oaktrail_panel_descriptor_v2 panel[4];/*panel descrs,38 bytes each*/
+       union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
+} __packed;
+
+struct oaktrail_gct_data {
+       u8 bpi; /* boot panel index, number of panel used during boot */
+       u8 pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */
+       struct oaktrail_timing_info DTD; /* timing info for the selected panel */
+       u32 Panel_Port_Control;
+       u32 PP_On_Sequencing;/*1 dword,Register 0x61208,*/
+       u32 PP_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+       u32 PP_Cycle_Delay;
+       u16 Panel_Backlight_Inverter_Descriptor;
+       u16 Panel_MIPI_Display_Descriptor;
+} __packed;
+
+#define MODE_SETTING_IN_CRTC           0x1
+#define MODE_SETTING_IN_ENCODER                0x2
+#define MODE_SETTING_ON_GOING          0x3
+#define MODE_SETTING_IN_DSR            0x4
+#define MODE_SETTING_ENCODER_DONE      0x8
+
+#define GCT_R10_HEADER_SIZE            16
+#define GCT_R10_DISPLAY_DESC_SIZE      28
+
+/*
+ *     Moorestown HDMI interfaces
+ */
+
+struct oaktrail_hdmi_dev {
+       struct pci_dev *dev;
+       void __iomem *regs;
+       unsigned int mmio, mmio_len;
+       int dpms_mode;
+       struct hdmi_i2c_dev *i2c_dev;
+
+       /* register state */
+       u32 saveDPLL_CTRL;
+       u32 saveDPLL_DIV_CTRL;
+       u32 saveDPLL_ADJUST;
+       u32 saveDPLL_UPDATE;
+       u32 saveDPLL_CLK_ENABLE;
+       u32 savePCH_HTOTAL_B;
+       u32 savePCH_HBLANK_B;
+       u32 savePCH_HSYNC_B;
+       u32 savePCH_VTOTAL_B;
+       u32 savePCH_VBLANK_B;
+       u32 savePCH_VSYNC_B;
+       u32 savePCH_PIPEBCONF;
+       u32 savePCH_PIPEBSRC;
+};
+
+extern void oaktrail_hdmi_setup(struct drm_device *dev);
+extern void oaktrail_hdmi_teardown(struct drm_device *dev);
+extern int  oaktrail_hdmi_i2c_init(struct pci_dev *dev);
+extern void oaktrail_hdmi_i2c_exit(struct pci_dev *dev);
+extern void oaktrail_hdmi_save(struct drm_device *dev);
+extern void oaktrail_hdmi_restore(struct drm_device *dev);
+extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev);
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
new file mode 100644 (file)
index 0000000..9d12a3e
--- /dev/null
@@ -0,0 +1,604 @@
+/*
+ * Copyright Â© 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include "framebuffer.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_display.h"
+#include "power.h"
+
+struct psb_intel_range_t {
+       int min, max;
+};
+
+struct oaktrail_limit_t {
+       struct psb_intel_range_t dot, m, p1;
+};
+
+struct oaktrail_clock_t {
+       /* derived values */
+       int dot;
+       int m;
+       int p1;
+};
+
+#define MRST_LIMIT_LVDS_100L       0
+#define MRST_LIMIT_LVDS_83         1
+#define MRST_LIMIT_LVDS_100        2
+
+#define MRST_DOT_MIN             19750
+#define MRST_DOT_MAX             120000
+#define MRST_M_MIN_100L                    20
+#define MRST_M_MIN_100             10
+#define MRST_M_MIN_83              12
+#define MRST_M_MAX_100L                    34
+#define MRST_M_MAX_100             17
+#define MRST_M_MAX_83              20
+#define MRST_P1_MIN                2
+#define MRST_P1_MAX_0              7
+#define MRST_P1_MAX_1              8
+
+static const struct oaktrail_limit_t oaktrail_limits[] = {
+       {                       /* MRST_LIMIT_LVDS_100L */
+        .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
+        .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
+        .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
+        },
+       {                       /* MRST_LIMIT_LVDS_83L */
+        .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
+        .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
+        .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
+        },
+       {                       /* MRST_LIMIT_LVDS_100 */
+        .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
+        .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
+        .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
+        },
+};
+
+#define MRST_M_MIN         10
+static const u32 oaktrail_m_converts[] = {
+       0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C,
+       0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25,
+       0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
+};
+
+static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
+{
+       const struct oaktrail_limit_t *limit = NULL;
+       struct drm_device *dev = crtc->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
+           || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
+               switch (dev_priv->core_freq) {
+               case 100:
+                       limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L];
+                       break;
+               case 166:
+                       limit = &oaktrail_limits[MRST_LIMIT_LVDS_83];
+                       break;
+               case 200:
+                       limit = &oaktrail_limits[MRST_LIMIT_LVDS_100];
+                       break;
+               }
+       } else {
+               limit = NULL;
+               dev_err(dev->dev, "oaktrail_limit Wrong display type.\n");
+       }
+
+       return limit;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+static void oaktrail_clock(int refclk, struct oaktrail_clock_t *clock)
+{
+       clock->dot = (refclk * clock->m) / (14 * clock->p1);
+}
+
+void mrstPrintPll(char *prefix, struct oaktrail_clock_t *clock)
+{
+       pr_debug("%s: dotclock = %d,  m = %d, p1 = %d.\n",
+            prefix, clock->dot, clock->m, clock->p1);
+}
+
+/**
+ * Returns a set of divisors for the desired target clock with the given refclk,
+ * or FALSE.  Divisor values are the actual divisors for
+ */
+static bool
+mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
+               struct oaktrail_clock_t *best_clock)
+{
+       struct oaktrail_clock_t clock;
+       const struct oaktrail_limit_t *limit = oaktrail_limit(crtc);
+       int err = target;
+
+       memset(best_clock, 0, sizeof(*best_clock));
+
+       for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
+               for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
+                    clock.p1++) {
+                       int this_err;
+
+                       oaktrail_clock(refclk, &clock);
+
+                       this_err = abs(clock.dot - target);
+                       if (this_err < err) {
+                               *best_clock = clock;
+                               err = this_err;
+                       }
+               }
+       }
+       dev_dbg(crtc->dev->dev, "mrstFindBestPLL err = %d.\n", err);
+       return err != target;
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
+       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+       u32 temp;
+       bool enabled;
+
+       if (!gma_power_begin(dev, true))
+               return;
+
+       /* XXX: When our outputs are all unaware of DPMS modes other than off
+        * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+        */
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+               /* Enable the DPLL */
+               temp = REG_READ(dpll_reg);
+               if ((temp & DPLL_VCO_ENABLE) == 0) {
+                       REG_WRITE(dpll_reg, temp);
+                       REG_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+                       REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+                       REG_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+                       REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+                       REG_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+               }
+               /* Enable the pipe */
+               temp = REG_READ(pipeconf_reg);
+               if ((temp & PIPEACONF_ENABLE) == 0)
+                       REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+               /* Enable the plane */
+               temp = REG_READ(dspcntr_reg);
+               if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+                       REG_WRITE(dspcntr_reg,
+                                 temp | DISPLAY_PLANE_ENABLE);
+                       /* Flush the plane changes */
+                       REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+               }
+
+               psb_intel_crtc_load_lut(crtc);
+
+               /* Give the overlay scaler a chance to enable
+                  if it's on this pipe */
+               /* psb_intel_crtc_dpms_video(crtc, true); TODO */
+               break;
+       case DRM_MODE_DPMS_OFF:
+               /* Give the overlay scaler a chance to disable
+                * if it's on this pipe */
+               /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+
+               /* Disable the VGA plane that we never use */
+               REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+               /* Disable display plane */
+               temp = REG_READ(dspcntr_reg);
+               if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+                       REG_WRITE(dspcntr_reg,
+                                 temp & ~DISPLAY_PLANE_ENABLE);
+                       /* Flush the plane changes */
+                       REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+                       REG_READ(dspbase_reg);
+               }
+
+               /* Next, disable display pipes */
+               temp = REG_READ(pipeconf_reg);
+               if ((temp & PIPEACONF_ENABLE) != 0) {
+                       REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+                       REG_READ(pipeconf_reg);
+               }
+               /* Wait for for the pipe disable to take effect. */
+               psb_intel_wait_for_vblank(dev);
+
+               temp = REG_READ(dpll_reg);
+               if ((temp & DPLL_VCO_ENABLE) != 0) {
+                       REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
+                       REG_READ(dpll_reg);
+               }
+
+               /* Wait for the clocks to turn off. */
+               udelay(150);
+               break;
+       }
+
+       enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+
+       /*Set FIFO Watermarks*/
+       REG_WRITE(DSPARB, 0x3FFF);
+       REG_WRITE(DSPFW1, 0x3F88080A);
+       REG_WRITE(DSPFW2, 0x0b060808);
+       REG_WRITE(DSPFW3, 0x0);
+       REG_WRITE(DSPFW4, 0x08030404);
+       REG_WRITE(DSPFW5, 0x04040404);
+       REG_WRITE(DSPFW6, 0x78);
+       REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000);
+       /* Must write Bit 14 of the Chicken Bit Register */
+
+       gma_power_end(dev);
+}
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int oaktrail_panel_fitter_pipe(struct drm_device *dev)
+{
+       u32 pfit_control;
+
+       pfit_control = REG_READ(PFIT_CONTROL);
+
+       /* See if the panel fitter is in use */
+       if ((pfit_control & PFIT_ENABLE) == 0)
+               return -1;
+       return (pfit_control >> 29) & 3;
+}
+
+static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
+                             struct drm_display_mode *mode,
+                             struct drm_display_mode *adjusted_mode,
+                             int x, int y,
+                             struct drm_framebuffer *old_fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int pipe = psb_intel_crtc->pipe;
+       int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
+       int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+       int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+       int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+       int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+       int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+       int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+       int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+       int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+       int refclk = 0;
+       struct oaktrail_clock_t clock;
+       u32 dpll = 0, fp = 0, dspcntr, pipeconf;
+       bool ok, is_sdvo = false;
+       bool is_crt = false, is_lvds = false, is_tv = false;
+       bool is_mipi = false;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct psb_intel_encoder *psb_intel_encoder = NULL;
+       uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
+       struct drm_connector *connector;
+
+       if (!gma_power_begin(dev, true))
+               return 0;
+
+       memcpy(&psb_intel_crtc->saved_mode,
+               mode,
+               sizeof(struct drm_display_mode));
+       memcpy(&psb_intel_crtc->saved_adjusted_mode,
+               adjusted_mode,
+               sizeof(struct drm_display_mode));
+
+       list_for_each_entry(connector, &mode_config->connector_list, head) {
+               if (!connector->encoder || connector->encoder->crtc != crtc)
+                       continue;
+
+               psb_intel_encoder = psb_intel_attached_encoder(connector);
+
+               switch (psb_intel_encoder->type) {
+               case INTEL_OUTPUT_LVDS:
+                       is_lvds = true;
+                       break;
+               case INTEL_OUTPUT_SDVO:
+                       is_sdvo = true;
+                       break;
+               case INTEL_OUTPUT_TVOUT:
+                       is_tv = true;
+                       break;
+               case INTEL_OUTPUT_ANALOG:
+                       is_crt = true;
+                       break;
+               case INTEL_OUTPUT_MIPI:
+                       is_mipi = true;
+                       break;
+               }
+       }
+
+       /* Disable the VGA plane that we never use */
+       REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+       /* Disable the panel fitter if it was on our pipe */
+       if (oaktrail_panel_fitter_pipe(dev) == pipe)
+               REG_WRITE(PFIT_CONTROL, 0);
+
+       REG_WRITE(pipesrc_reg,
+                 ((mode->crtc_hdisplay - 1) << 16) |
+                 (mode->crtc_vdisplay - 1));
+
+       if (psb_intel_encoder)
+               drm_connector_property_get_value(connector,
+                       dev->mode_config.scaling_mode_property, &scalingType);
+
+       if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
+               /* Moorestown doesn't have register support for centering so
+                * we need to mess with the h/vblank and h/vsync start and
+                * ends to get centering */
+               int offsetX = 0, offsetY = 0;
+
+               offsetX = (adjusted_mode->crtc_hdisplay -
+                          mode->crtc_hdisplay) / 2;
+               offsetY = (adjusted_mode->crtc_vdisplay -
+                          mode->crtc_vdisplay) / 2;
+
+               REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
+                       ((adjusted_mode->crtc_htotal - 1) << 16));
+               REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
+                       ((adjusted_mode->crtc_vtotal - 1) << 16));
+               REG_WRITE(hblank_reg,
+                       (adjusted_mode->crtc_hblank_start - offsetX - 1) |
+                       ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
+               REG_WRITE(hsync_reg,
+                       (adjusted_mode->crtc_hsync_start - offsetX - 1) |
+                       ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
+               REG_WRITE(vblank_reg,
+                       (adjusted_mode->crtc_vblank_start - offsetY - 1) |
+                       ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
+               REG_WRITE(vsync_reg,
+                       (adjusted_mode->crtc_vsync_start - offsetY - 1) |
+                       ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
+       } else {
+               REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+                       ((adjusted_mode->crtc_htotal - 1) << 16));
+               REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+                       ((adjusted_mode->crtc_vtotal - 1) << 16));
+               REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+                       ((adjusted_mode->crtc_hblank_end - 1) << 16));
+               REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+                       ((adjusted_mode->crtc_hsync_end - 1) << 16));
+               REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+                       ((adjusted_mode->crtc_vblank_end - 1) << 16));
+               REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+                       ((adjusted_mode->crtc_vsync_end - 1) << 16));
+       }
+
+       /* Flush the plane changes */
+       {
+               struct drm_crtc_helper_funcs *crtc_funcs =
+                   crtc->helper_private;
+               crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+       }
+
+       /* setup pipeconf */
+       pipeconf = REG_READ(pipeconf_reg);
+
+       /* Set up the display plane register */
+       dspcntr = REG_READ(dspcntr_reg);
+       dspcntr |= DISPPLANE_GAMMA_ENABLE;
+
+       if (pipe == 0)
+               dspcntr |= DISPPLANE_SEL_PIPE_A;
+       else
+               dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+       dev_priv->dspcntr = dspcntr |= DISPLAY_PLANE_ENABLE;
+       dev_priv->pipeconf = pipeconf |= PIPEACONF_ENABLE;
+
+       if (is_mipi)
+               goto oaktrail_crtc_mode_set_exit;
+
+       refclk = dev_priv->core_freq * 1000;
+
+       dpll = 0;               /*BIT16 = 0 for 100MHz reference */
+
+       ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
+
+       if (!ok) {
+               dev_dbg(dev->dev, "mrstFindBestPLL fail in oaktrail_crtc_mode_set.\n");
+       } else {
+               dev_dbg(dev->dev, "oaktrail_crtc_mode_set pixel clock = %d,"
+                        "m = %x, p1 = %x.\n", clock.dot, clock.m,
+                        clock.p1);
+       }
+
+       fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
+
+       dpll |= DPLL_VGA_MODE_DIS;
+
+
+       dpll |= DPLL_VCO_ENABLE;
+
+       if (is_lvds)
+               dpll |= DPLLA_MODE_LVDS;
+       else
+               dpll |= DPLLB_MODE_DAC_SERIAL;
+
+       if (is_sdvo) {
+               int sdvo_pixel_multiply =
+                   adjusted_mode->clock / mode->clock;
+
+               dpll |= DPLL_DVO_HIGH_SPEED;
+               dpll |=
+                   (sdvo_pixel_multiply -
+                    1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+       }
+
+
+       /* compute bitmask from p1 value */
+       dpll |= (1 << (clock.p1 - 2)) << 17;
+
+       dpll |= DPLL_VCO_ENABLE;
+
+       mrstPrintPll("chosen", &clock);
+
+       if (dpll & DPLL_VCO_ENABLE) {
+               REG_WRITE(fp_reg, fp);
+               REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
+               REG_READ(dpll_reg);
+               /* Check the DPLLA lock bit PIPEACONF[29] */
+               udelay(150);
+       }
+
+       REG_WRITE(fp_reg, fp);
+       REG_WRITE(dpll_reg, dpll);
+       REG_READ(dpll_reg);
+       /* Wait for the clocks to stabilize. */
+       udelay(150);
+
+       /* write it again -- the BIOS does, after all */
+       REG_WRITE(dpll_reg, dpll);
+       REG_READ(dpll_reg);
+       /* Wait for the clocks to stabilize. */
+       udelay(150);
+
+       REG_WRITE(pipeconf_reg, pipeconf);
+       REG_READ(pipeconf_reg);
+       psb_intel_wait_for_vblank(dev);
+
+       REG_WRITE(dspcntr_reg, dspcntr);
+       psb_intel_wait_for_vblank(dev);
+
+oaktrail_crtc_mode_set_exit:
+       gma_power_end(dev);
+       return 0;
+}
+
+static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+int oaktrail_pipe_set_base(struct drm_crtc *crtc,
+                           int x, int y, struct drm_framebuffer *old_fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+       int pipe = psb_intel_crtc->pipe;
+       unsigned long start, offset;
+
+       int dspbase = (pipe == 0 ? DSPALINOFF : DSPBBASE);
+       int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
+       int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       u32 dspcntr;
+       int ret = 0;
+
+       /* no fb bound */
+       if (!crtc->fb) {
+               dev_dbg(dev->dev, "No FB bound\n");
+               return 0;
+       }
+
+       if (!gma_power_begin(dev, true))
+               return 0;
+
+       start = psbfb->gtt->offset;
+       offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+
+       REG_WRITE(dspstride, crtc->fb->pitches[0]);
+
+       dspcntr = REG_READ(dspcntr_reg);
+       dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+       switch (crtc->fb->bits_per_pixel) {
+       case 8:
+               dspcntr |= DISPPLANE_8BPP;
+               break;
+       case 16:
+               if (crtc->fb->depth == 15)
+                       dspcntr |= DISPPLANE_15_16BPP;
+               else
+                       dspcntr |= DISPPLANE_16BPP;
+               break;
+       case 24:
+       case 32:
+               dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+               break;
+       default:
+               dev_err(dev->dev, "Unknown color depth\n");
+               ret = -EINVAL;
+               goto pipe_set_base_exit;
+       }
+       REG_WRITE(dspcntr_reg, dspcntr);
+
+       REG_WRITE(dspbase, offset);
+       REG_READ(dspbase);
+       REG_WRITE(dspsurf, start);
+       REG_READ(dspsurf);
+
+pipe_set_base_exit:
+       gma_power_end(dev);
+       return ret;
+}
+
+static void oaktrail_crtc_prepare(struct drm_crtc *crtc)
+{
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void oaktrail_crtc_commit(struct drm_crtc *crtc)
+{
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
+       .dpms = oaktrail_crtc_dpms,
+       .mode_fixup = oaktrail_crtc_mode_fixup,
+       .mode_set = oaktrail_crtc_mode_set,
+       .mode_set_base = oaktrail_pipe_set_base,
+       .prepare = oaktrail_crtc_prepare,
+       .commit = oaktrail_crtc_commit,
+};
+
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
new file mode 100644 (file)
index 0000000..63aea2f
--- /dev/null
@@ -0,0 +1,512 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/dmi.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include <asm/mrst.h>
+#include <asm/intel_scu_ipc.h>
+#include "mid_bios.h"
+#include "intel_bios.h"
+
+static int oaktrail_output_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       if (dev_priv->iLVDS_enable)
+               oaktrail_lvds_init(dev, &dev_priv->mode_dev);
+       else
+               dev_err(dev->dev, "DSI is not supported\n");
+       if (dev_priv->hdmi_priv)
+               oaktrail_hdmi_init(dev, &dev_priv->mode_dev);
+       return 0;
+}
+
+/*
+ *     Provide the low level interfaces for the Moorestown backlight
+ */
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+#define MRST_BLC_MAX_PWM_REG_FREQ          0xFFFF
+#define BLC_PWM_PRECISION_FACTOR 100   /* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+#define BLC_ADJUSTMENT_MAX 100
+
+static struct backlight_device *oaktrail_backlight_device;
+static int oaktrail_brightness;
+
+static int oaktrail_set_brightness(struct backlight_device *bd)
+{
+       struct drm_device *dev = bl_get_data(oaktrail_backlight_device);
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int level = bd->props.brightness;
+       u32 blc_pwm_ctl;
+       u32 max_pwm_blc;
+
+       /* Percentage 1-100% being valid */
+       if (level < 1)
+               level = 1;
+
+       if (gma_power_begin(dev, 0)) {
+               /* Calculate and set the brightness value */
+               max_pwm_blc = REG_READ(BLC_PWM_CTL) >> 16;
+               blc_pwm_ctl = level * max_pwm_blc / 100;
+
+               /* Adjust the backlight level with the percent in
+                * dev_priv->blc_adj1;
+                */
+               blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj1;
+               blc_pwm_ctl = blc_pwm_ctl / 100;
+
+               /* Adjust the backlight level with the percent in
+                * dev_priv->blc_adj2;
+                */
+               blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj2;
+               blc_pwm_ctl = blc_pwm_ctl / 100;
+
+               /* force PWM bit on */
+               REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
+               REG_WRITE(BLC_PWM_CTL, (max_pwm_blc << 16) | blc_pwm_ctl);
+               gma_power_end(dev);
+       }
+       oaktrail_brightness = level;
+       return 0;
+}
+
+static int oaktrail_get_brightness(struct backlight_device *bd)
+{
+       /* return locally cached var instead of HW read (due to DPST etc.) */
+       /* FIXME: ideally return actual value in case firmware fiddled with
+          it */
+       return oaktrail_brightness;
+}
+
+static int device_backlight_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned long core_clock;
+       u16 bl_max_freq;
+       uint32_t value;
+       uint32_t blc_pwm_precision_factor;
+
+       dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
+       dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
+       bl_max_freq = 256;
+       /* this needs to be set elsewhere */
+       blc_pwm_precision_factor = BLC_PWM_PRECISION_FACTOR;
+
+       core_clock = dev_priv->core_freq;
+
+       value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
+       value *= blc_pwm_precision_factor;
+       value /= bl_max_freq;
+       value /= blc_pwm_precision_factor;
+
+       if (value > (unsigned long long)MRST_BLC_MAX_PWM_REG_FREQ)
+                       return -ERANGE;
+
+       if (gma_power_begin(dev, false)) {
+               REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
+               REG_WRITE(BLC_PWM_CTL, value | (value << 16));
+               gma_power_end(dev);
+       }
+       return 0;
+}
+
+static const struct backlight_ops oaktrail_ops = {
+       .get_brightness = oaktrail_get_brightness,
+       .update_status  = oaktrail_set_brightness,
+};
+
+int oaktrail_backlight_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int ret;
+       struct backlight_properties props;
+
+       memset(&props, 0, sizeof(struct backlight_properties));
+       props.max_brightness = 100;
+       props.type = BACKLIGHT_PLATFORM;
+
+       oaktrail_backlight_device = backlight_device_register("oaktrail-bl",
+                               NULL, (void *)dev, &oaktrail_ops, &props);
+
+       if (IS_ERR(oaktrail_backlight_device))
+               return PTR_ERR(oaktrail_backlight_device);
+
+       ret = device_backlight_init(dev);
+       if (ret < 0) {
+               backlight_device_unregister(oaktrail_backlight_device);
+               return ret;
+       }
+       oaktrail_backlight_device->props.brightness = 100;
+       oaktrail_backlight_device->props.max_brightness = 100;
+       backlight_update_status(oaktrail_backlight_device);
+       dev_priv->backlight_device = oaktrail_backlight_device;
+       return 0;
+}
+
+#endif
+
+/*
+ *     Provide the Moorestown specific chip logic and low level methods
+ *     for power management
+ */
+
+static void oaktrail_init_pm(struct drm_device *dev)
+{
+}
+
+/**
+ *     oaktrail_save_display_registers -       save registers lost on suspend
+ *     @dev: our DRM device
+ *
+ *     Save the state we need in order to be able to restore the interface
+ *     upon resume from suspend
+ */
+static int oaktrail_save_display_registers(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int i;
+       u32 pp_stat;
+
+       /* Display arbitration control + watermarks */
+       dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
+       dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
+       dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
+       dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
+       dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
+       dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
+       dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
+       dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
+
+       /* Pipe & plane A info */
+       dev_priv->savePIPEACONF = PSB_RVDC32(PIPEACONF);
+       dev_priv->savePIPEASRC = PSB_RVDC32(PIPEASRC);
+       dev_priv->saveFPA0 = PSB_RVDC32(MRST_FPA0);
+       dev_priv->saveFPA1 = PSB_RVDC32(MRST_FPA1);
+       dev_priv->saveDPLL_A = PSB_RVDC32(MRST_DPLL_A);
+       dev_priv->saveHTOTAL_A = PSB_RVDC32(HTOTAL_A);
+       dev_priv->saveHBLANK_A = PSB_RVDC32(HBLANK_A);
+       dev_priv->saveHSYNC_A = PSB_RVDC32(HSYNC_A);
+       dev_priv->saveVTOTAL_A = PSB_RVDC32(VTOTAL_A);
+       dev_priv->saveVBLANK_A = PSB_RVDC32(VBLANK_A);
+       dev_priv->saveVSYNC_A = PSB_RVDC32(VSYNC_A);
+       dev_priv->saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
+       dev_priv->saveDSPACNTR = PSB_RVDC32(DSPACNTR);
+       dev_priv->saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE);
+       dev_priv->saveDSPAADDR = PSB_RVDC32(DSPABASE);
+       dev_priv->saveDSPASURF = PSB_RVDC32(DSPASURF);
+       dev_priv->saveDSPALINOFF = PSB_RVDC32(DSPALINOFF);
+       dev_priv->saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF);
+
+       /* Save cursor regs */
+       dev_priv->saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
+       dev_priv->saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
+       dev_priv->saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
+
+       /* Save palette (gamma) */
+       for (i = 0; i < 256; i++)
+               dev_priv->save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i << 2));
+
+       if (dev_priv->hdmi_priv)
+               oaktrail_hdmi_save(dev);
+
+       /* Save performance state */
+       dev_priv->savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE);
+
+       /* LVDS state */
+       dev_priv->savePP_CONTROL = PSB_RVDC32(PP_CONTROL);
+       dev_priv->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
+       dev_priv->savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS);
+       dev_priv->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL);
+       dev_priv->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2);
+       dev_priv->saveLVDS = PSB_RVDC32(LVDS);
+       dev_priv->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
+       dev_priv->savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON);
+       dev_priv->savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF);
+       dev_priv->savePP_DIVISOR = PSB_RVDC32(PP_CYCLE);
+
+       /* HW overlay */
+       dev_priv->saveOV_OVADD = PSB_RVDC32(OV_OVADD);
+       dev_priv->saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
+       dev_priv->saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
+       dev_priv->saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
+       dev_priv->saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
+       dev_priv->saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
+       dev_priv->saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
+
+       /* DPST registers */
+       dev_priv->saveHISTOGRAM_INT_CONTROL_REG =
+                                       PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+       dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG =
+                                       PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+       dev_priv->savePWM_CONTROL_LOGIC = PSB_RVDC32(PWM_CONTROL_LOGIC);
+
+       if (dev_priv->iLVDS_enable) {
+               /* Shut down the panel */
+               PSB_WVDC32(0, PP_CONTROL);
+
+               do {
+                       pp_stat = PSB_RVDC32(PP_STATUS);
+               } while (pp_stat & 0x80000000);
+
+               /* Turn off the plane */
+               PSB_WVDC32(0x58000000, DSPACNTR);
+               /* Trigger the plane disable */
+               PSB_WVDC32(0, DSPASURF);
+
+               /* Wait ~4 ticks */
+               msleep(4);
+
+               /* Turn off pipe */
+               PSB_WVDC32(0x0, PIPEACONF);
+               /* Wait ~8 ticks */
+               msleep(8);
+
+               /* Turn off PLLs */
+               PSB_WVDC32(0, MRST_DPLL_A);
+       }
+       return 0;
+}
+
+/**
+ *     oaktrail_restore_display_registers      -       restore lost register state
+ *     @dev: our DRM device
+ *
+ *     Restore register state that was lost during suspend and resume.
+ */
+static int oaktrail_restore_display_registers(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 pp_stat;
+       int i;
+
+       /* Display arbitration + watermarks */
+       PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
+       PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
+       PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
+       PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
+       PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
+       PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
+       PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
+       PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
+
+       /* Make sure VGA plane is off. it initializes to on after reset!*/
+       PSB_WVDC32(0x80000000, VGACNTRL);
+
+       /* set the plls */
+       PSB_WVDC32(dev_priv->saveFPA0, MRST_FPA0);
+       PSB_WVDC32(dev_priv->saveFPA1, MRST_FPA1);
+
+       /* Actually enable it */
+       PSB_WVDC32(dev_priv->saveDPLL_A, MRST_DPLL_A);
+       DRM_UDELAY(150);
+
+       /* Restore mode */
+       PSB_WVDC32(dev_priv->saveHTOTAL_A, HTOTAL_A);
+       PSB_WVDC32(dev_priv->saveHBLANK_A, HBLANK_A);
+       PSB_WVDC32(dev_priv->saveHSYNC_A, HSYNC_A);
+       PSB_WVDC32(dev_priv->saveVTOTAL_A, VTOTAL_A);
+       PSB_WVDC32(dev_priv->saveVBLANK_A, VBLANK_A);
+       PSB_WVDC32(dev_priv->saveVSYNC_A, VSYNC_A);
+       PSB_WVDC32(dev_priv->savePIPEASRC, PIPEASRC);
+       PSB_WVDC32(dev_priv->saveBCLRPAT_A, BCLRPAT_A);
+
+       /* Restore performance mode*/
+       PSB_WVDC32(dev_priv->savePERF_MODE, MRST_PERF_MODE);
+
+       /* Enable the pipe*/
+       if (dev_priv->iLVDS_enable)
+               PSB_WVDC32(dev_priv->savePIPEACONF, PIPEACONF);
+
+       /* Set up the plane*/
+       PSB_WVDC32(dev_priv->saveDSPALINOFF, DSPALINOFF);
+       PSB_WVDC32(dev_priv->saveDSPASTRIDE, DSPASTRIDE);
+       PSB_WVDC32(dev_priv->saveDSPATILEOFF, DSPATILEOFF);
+
+       /* Enable the plane */
+       PSB_WVDC32(dev_priv->saveDSPACNTR, DSPACNTR);
+       PSB_WVDC32(dev_priv->saveDSPASURF, DSPASURF);
+
+       /* Enable Cursor A */
+       PSB_WVDC32(dev_priv->saveDSPACURSOR_CTRL, CURACNTR);
+       PSB_WVDC32(dev_priv->saveDSPACURSOR_POS, CURAPOS);
+       PSB_WVDC32(dev_priv->saveDSPACURSOR_BASE, CURABASE);
+
+       /* Restore palette (gamma) */
+       for (i = 0; i < 256; i++)
+               PSB_WVDC32(dev_priv->save_palette_a[i], PALETTE_A + (i << 2));
+
+       if (dev_priv->hdmi_priv)
+               oaktrail_hdmi_restore(dev);
+
+       if (dev_priv->iLVDS_enable) {
+               PSB_WVDC32(dev_priv->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
+               PSB_WVDC32(dev_priv->saveLVDS, LVDS); /*port 61180h*/
+               PSB_WVDC32(dev_priv->savePFIT_CONTROL, PFIT_CONTROL);
+               PSB_WVDC32(dev_priv->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
+               PSB_WVDC32(dev_priv->savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS);
+               PSB_WVDC32(dev_priv->saveBLC_PWM_CTL, BLC_PWM_CTL);
+               PSB_WVDC32(dev_priv->savePP_ON_DELAYS, LVDSPP_ON);
+               PSB_WVDC32(dev_priv->savePP_OFF_DELAYS, LVDSPP_OFF);
+               PSB_WVDC32(dev_priv->savePP_DIVISOR, PP_CYCLE);
+               PSB_WVDC32(dev_priv->savePP_CONTROL, PP_CONTROL);
+       }
+
+       /* Wait for cycle delay */
+       do {
+               pp_stat = PSB_RVDC32(PP_STATUS);
+       } while (pp_stat & 0x08000000);
+
+       /* Wait for panel power up */
+       do {
+               pp_stat = PSB_RVDC32(PP_STATUS);
+       } while (pp_stat & 0x10000000);
+
+       /* Restore HW overlay */
+       PSB_WVDC32(dev_priv->saveOV_OVADD, OV_OVADD);
+       PSB_WVDC32(dev_priv->saveOV_OGAMC0, OV_OGAMC0);
+       PSB_WVDC32(dev_priv->saveOV_OGAMC1, OV_OGAMC1);
+       PSB_WVDC32(dev_priv->saveOV_OGAMC2, OV_OGAMC2);
+       PSB_WVDC32(dev_priv->saveOV_OGAMC3, OV_OGAMC3);
+       PSB_WVDC32(dev_priv->saveOV_OGAMC4, OV_OGAMC4);
+       PSB_WVDC32(dev_priv->saveOV_OGAMC5, OV_OGAMC5);
+
+       /* DPST registers */
+       PSB_WVDC32(dev_priv->saveHISTOGRAM_INT_CONTROL_REG,
+                                               HISTOGRAM_INT_CONTROL);
+       PSB_WVDC32(dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG,
+                                               HISTOGRAM_LOGIC_CONTROL);
+       PSB_WVDC32(dev_priv->savePWM_CONTROL_LOGIC, PWM_CONTROL_LOGIC);
+
+       return 0;
+}
+
+/**
+ *     oaktrail_power_down     -       power down the display island
+ *     @dev: our DRM device
+ *
+ *     Power down the display interface of our device
+ */
+static int oaktrail_power_down(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 pwr_mask ;
+       u32 pwr_sts;
+
+       pwr_mask = PSB_PWRGT_DISPLAY_MASK;
+       outl(pwr_mask, dev_priv->ospm_base + PSB_PM_SSC);
+
+       while (true) {
+               pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
+               if ((pwr_sts & pwr_mask) == pwr_mask)
+                       break;
+               else
+                       udelay(10);
+       }
+       return 0;
+}
+
+/*
+ * oaktrail_power_up
+ *
+ * Restore power to the specified island(s) (powergating)
+ */
+static int oaktrail_power_up(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 pwr_mask = PSB_PWRGT_DISPLAY_MASK;
+       u32 pwr_sts, pwr_cnt;
+
+       pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
+       pwr_cnt &= ~pwr_mask;
+       outl(pwr_cnt, (dev_priv->ospm_base + PSB_PM_SSC));
+
+       while (true) {
+               pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
+               if ((pwr_sts & pwr_mask) == 0)
+                       break;
+               else
+                       udelay(10);
+       }
+       return 0;
+}
+
+
+static int oaktrail_chip_setup(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
+       int ret;
+       
+       ret = mid_chip_setup(dev);
+       if (ret < 0)
+               return ret;
+       if (vbt->size == 0) {
+               /* Now pull the BIOS data */
+               gma_intel_opregion_init(dev);
+               psb_intel_init_bios(dev);
+       }
+       return 0;
+}
+
+static void oaktrail_teardown(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
+
+       oaktrail_hdmi_teardown(dev);
+       if (vbt->size == 0)
+               psb_intel_destroy_bios(dev);
+}
+
+const struct psb_ops oaktrail_chip_ops = {
+       .name = "Oaktrail",
+       .accel_2d = 1,
+       .pipes = 2,
+       .crtcs = 2,
+       .sgx_offset = MRST_SGX_OFFSET,
+
+       .chip_setup = oaktrail_chip_setup,
+       .chip_teardown = oaktrail_teardown,
+       .crtc_helper = &oaktrail_helper_funcs,
+       .crtc_funcs = &psb_intel_crtc_funcs,
+
+       .output_init = oaktrail_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+       .backlight_init = oaktrail_backlight_init,
+#endif
+
+       .init_pm = oaktrail_init_pm,
+       .save_regs = oaktrail_save_display_registers,
+       .restore_regs = oaktrail_restore_display_registers,
+       .power_down = oaktrail_power_down,
+       .power_up = oaktrail_power_up,
+
+       .i2c_bus = 1,
+};
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
new file mode 100644 (file)
index 0000000..36878a6
--- /dev/null
@@ -0,0 +1,859 @@
+/*
+ * Copyright Â© 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Li Peng <peng.li@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_drv.h"
+
+#define HDMI_READ(reg)         readl(hdmi_dev->regs + (reg))
+#define HDMI_WRITE(reg, val)   writel(val, hdmi_dev->regs + (reg))
+
+#define HDMI_HCR       0x1000
+#define HCR_ENABLE_HDCP                (1 << 5)
+#define HCR_ENABLE_AUDIO       (1 << 2)
+#define HCR_ENABLE_PIXEL       (1 << 1)
+#define HCR_ENABLE_TMDS                (1 << 0)
+
+#define HDMI_HICR      0x1004
+#define HDMI_HSR       0x1008
+#define HDMI_HISR      0x100C
+#define HDMI_DETECT_HDP                (1 << 0)
+
+#define HDMI_VIDEO_REG 0x3000
+#define HDMI_UNIT_EN           (1 << 7)
+#define HDMI_MODE_OUTPUT       (1 << 0)
+#define HDMI_HBLANK_A  0x3100
+
+#define HDMI_AUDIO_CTRL        0x4000
+#define HDMI_ENABLE_AUDIO      (1 << 0)
+
+#define PCH_HTOTAL_B   0x3100
+#define PCH_HBLANK_B   0x3104
+#define PCH_HSYNC_B    0x3108
+#define PCH_VTOTAL_B   0x310C
+#define PCH_VBLANK_B   0x3110
+#define PCH_VSYNC_B    0x3114
+#define PCH_PIPEBSRC   0x311C
+
+#define PCH_PIPEB_DSL  0x3800
+#define PCH_PIPEB_SLC  0x3804
+#define PCH_PIPEBCONF  0x3808
+#define PCH_PIPEBSTAT  0x3824
+
+#define CDVO_DFT       0x5000
+#define CDVO_SLEWRATE  0x5004
+#define CDVO_STRENGTH  0x5008
+#define CDVO_RCOMP     0x500C
+
+#define DPLL_CTRL       0x6000
+#define DPLL_PDIV_SHIFT                16
+#define DPLL_PDIV_MASK         (0xf << 16)
+#define DPLL_PWRDN             (1 << 4)
+#define DPLL_RESET             (1 << 3)
+#define DPLL_FASTEN            (1 << 2)
+#define DPLL_ENSTAT            (1 << 1)
+#define DPLL_DITHEN            (1 << 0)
+
+#define DPLL_DIV_CTRL   0x6004
+#define DPLL_CLKF_MASK         0xffffffc0
+#define DPLL_CLKR_MASK         (0x3f)
+
+#define DPLL_CLK_ENABLE 0x6008
+#define DPLL_EN_DISP           (1 << 31)
+#define DPLL_SEL_HDMI          (1 << 8)
+#define DPLL_EN_HDMI           (1 << 1)
+#define DPLL_EN_VGA            (1 << 0)
+
+#define DPLL_ADJUST     0x600C
+#define DPLL_STATUS     0x6010
+#define DPLL_UPDATE     0x6014
+#define DPLL_DFT        0x6020
+
+struct intel_range {
+       int     min, max;
+};
+
+struct oaktrail_hdmi_limit {
+       struct intel_range vco, np, nr, nf;
+};
+
+struct oaktrail_hdmi_clock {
+       int np;
+       int nr;
+       int nf;
+       int dot;
+};
+
+#define VCO_MIN                320000
+#define VCO_MAX                1650000
+#define        NP_MIN          1
+#define        NP_MAX          15
+#define        NR_MIN          1
+#define        NR_MAX          64
+#define NF_MIN         2
+#define NF_MAX         4095
+
+static const struct oaktrail_hdmi_limit oaktrail_hdmi_limit = {
+       .vco = { .min = VCO_MIN,                .max = VCO_MAX },
+       .np  = { .min = NP_MIN,                 .max = NP_MAX  },
+       .nr  = { .min = NR_MIN,                 .max = NR_MAX  },
+       .nf  = { .min = NF_MIN,                 .max = NF_MAX  },
+};
+
+static void wait_for_vblank(struct drm_device *dev)
+{
+       /* FIXME: Can we do this as a sleep ? */
+       /* Wait for 20ms, i.e. one cycle at 50hz. */
+       mdelay(20);
+}
+
+static void scu_busy_loop(void *scu_base)
+{
+       u32 status = 0;
+       u32 loop_count = 0;
+
+       status = readl(scu_base + 0x04);
+       while (status & 1) {
+               udelay(1); /* scu processing time is in few u secods */
+               status = readl(scu_base + 0x04);
+               loop_count++;
+               /* break if scu doesn't reset busy bit after huge retry */
+               if (loop_count > 1000) {
+                       DRM_DEBUG_KMS("SCU IPC timed out");
+                       return;
+               }
+       }
+}
+
+static void oaktrail_hdmi_reset(struct drm_device *dev)
+{
+       void *base;
+       /* FIXME: at least make these defines */
+       unsigned int scu_ipc_mmio = 0xff11c000;
+       int scu_len = 1024;
+
+       base = ioremap((resource_size_t)scu_ipc_mmio, scu_len);
+       if (base == NULL) {
+               DRM_ERROR("failed to map SCU mmio\n");
+               return;
+       }
+
+       /* scu ipc: assert hdmi controller reset */
+       writel(0xff11d118, base + 0x0c);
+       writel(0x7fffffdf, base + 0x80);
+       writel(0x42005, base + 0x0);
+       scu_busy_loop(base);
+
+       /* scu ipc: de-assert hdmi controller reset */
+       writel(0xff11d118, base + 0x0c);
+       writel(0x7fffffff, base + 0x80);
+       writel(0x42005, base + 0x0);
+       scu_busy_loop(base);
+
+       iounmap(base);
+}
+
+static void oaktrail_hdmi_audio_enable(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+
+       HDMI_WRITE(HDMI_HCR, 0x67);
+       HDMI_READ(HDMI_HCR);
+
+       HDMI_WRITE(0x51a8, 0x10);
+       HDMI_READ(0x51a8);
+
+       HDMI_WRITE(HDMI_AUDIO_CTRL, 0x1);
+       HDMI_READ(HDMI_AUDIO_CTRL);
+}
+
+static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+
+       HDMI_WRITE(0x51a8, 0x0);
+       HDMI_READ(0x51a8);
+
+       HDMI_WRITE(HDMI_AUDIO_CTRL, 0x0);
+       HDMI_READ(HDMI_AUDIO_CTRL);
+
+       HDMI_WRITE(HDMI_HCR, 0x47);
+       HDMI_READ(HDMI_HCR);
+}
+
+void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct drm_device *dev = crtc->dev;
+       u32 temp;
+
+       switch (mode) {
+       case DRM_MODE_DPMS_OFF:
+               /* Disable VGACNTRL */
+               REG_WRITE(VGACNTRL, 0x80000000);
+
+               /* Disable plane */
+               temp = REG_READ(DSPBCNTR);
+               if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+                       REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE);
+                       REG_READ(DSPBCNTR);
+                       /* Flush the plane changes */
+                       REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+                       REG_READ(DSPBSURF);
+               }
+
+               /* Disable pipe B */
+               temp = REG_READ(PIPEBCONF);
+               if ((temp & PIPEACONF_ENABLE) != 0) {
+                       REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+                       REG_READ(PIPEBCONF);
+               }
+
+               /* Disable LNW Pipes, etc */
+               temp = REG_READ(PCH_PIPEBCONF);
+               if ((temp & PIPEACONF_ENABLE) != 0) {
+                       REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+                       REG_READ(PCH_PIPEBCONF);
+               }
+               /* wait for pipe off */
+               udelay(150);
+               /* Disable dpll */
+               temp = REG_READ(DPLL_CTRL);
+               if ((temp & DPLL_PWRDN) == 0) {
+                       REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET));
+                       REG_WRITE(DPLL_STATUS, 0x1);
+               }
+               /* wait for dpll off */
+               udelay(150);
+               break;
+       case DRM_MODE_DPMS_ON:
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+               /* Enable dpll */
+               temp = REG_READ(DPLL_CTRL);
+               if ((temp & DPLL_PWRDN) != 0) {
+                       REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET));
+                       temp = REG_READ(DPLL_CLK_ENABLE);
+                       REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI);
+                       REG_READ(DPLL_CLK_ENABLE);
+               }
+               /* wait for dpll warm up */
+               udelay(150);
+
+               /* Enable pipe B */
+               temp = REG_READ(PIPEBCONF);
+               if ((temp & PIPEACONF_ENABLE) == 0) {
+                       REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE);
+                       REG_READ(PIPEBCONF);
+               }
+
+               /* Enable LNW Pipe B */
+               temp = REG_READ(PCH_PIPEBCONF);
+               if ((temp & PIPEACONF_ENABLE) == 0) {
+                       REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE);
+                       REG_READ(PCH_PIPEBCONF);
+               }
+               wait_for_vblank(dev);
+
+               /* Enable plane */
+               temp = REG_READ(DSPBCNTR);
+               if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+                       REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE);
+                       /* Flush the plane changes */
+                       REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+                       REG_READ(DSPBSURF);
+               }
+               psb_intel_crtc_load_lut(crtc);
+       }
+       /* DSPARB */
+       REG_WRITE(DSPARB, 0x00003fbf);
+       /* FW1 */
+       REG_WRITE(0x70034, 0x3f880a0a);
+       /* FW2 */
+       REG_WRITE(0x70038, 0x0b060808);
+       /* FW4 */
+       REG_WRITE(0x70050, 0x08030404);
+       /* FW5 */
+       REG_WRITE(0x70054, 0x04040404);
+       /* LNC Chicken Bits */
+       REG_WRITE(0x70400, 0x4000);
+}
+
+
+static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
+{
+       static int dpms_mode = -1;
+
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+       u32 temp;
+
+       if (dpms_mode == mode)
+               return;
+
+       if (mode != DRM_MODE_DPMS_ON)
+               temp = 0x0;
+       else
+               temp = 0x99;
+
+       dpms_mode = mode;
+       HDMI_WRITE(HDMI_VIDEO_REG, temp);
+}
+
+static unsigned int htotal_calculate(struct drm_display_mode *mode)
+{
+       u32 htotal, new_crtc_htotal;
+
+       htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
+
+       /*
+        * 1024 x 768  new_crtc_htotal = 0x1024;
+        * 1280 x 1024 new_crtc_htotal = 0x0c34;
+        */
+       new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock;
+
+       return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16);
+}
+
+static void oaktrail_hdmi_find_dpll(struct drm_crtc *crtc, int target,
+                               int refclk, struct oaktrail_hdmi_clock *best_clock)
+{
+       int np_min, np_max, nr_min, nr_max;
+       int np, nr, nf;
+
+       np_min = DIV_ROUND_UP(oaktrail_hdmi_limit.vco.min, target * 10);
+       np_max = oaktrail_hdmi_limit.vco.max / (target * 10);
+       if (np_min < oaktrail_hdmi_limit.np.min)
+               np_min = oaktrail_hdmi_limit.np.min;
+       if (np_max > oaktrail_hdmi_limit.np.max)
+               np_max = oaktrail_hdmi_limit.np.max;
+
+       nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max));
+       nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min));
+       if (nr_min < oaktrail_hdmi_limit.nr.min)
+               nr_min = oaktrail_hdmi_limit.nr.min;
+       if (nr_max > oaktrail_hdmi_limit.nr.max)
+               nr_max = oaktrail_hdmi_limit.nr.max;
+
+       np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max));
+       nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np));
+       nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk);
+       DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf);
+
+       /*
+        * 1024 x 768  np = 1; nr = 0x26; nf = 0x0fd8000;
+        * 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000;
+        */
+       best_clock->np = np;
+       best_clock->nr = nr - 1;
+       best_clock->nf = (nf << 14);
+}
+
+int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
+                           struct drm_display_mode *mode,
+                           struct drm_display_mode *adjusted_mode,
+                           int x, int y,
+                           struct drm_framebuffer *old_fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+       int pipe = 1;
+       int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+       int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+       int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+       int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+       int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+       int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+       int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+       int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+       int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+       int refclk;
+       struct oaktrail_hdmi_clock clock;
+       u32 dspcntr, pipeconf, dpll, temp;
+       int dspcntr_reg = DSPBCNTR;
+
+       /* Disable the VGA plane that we never use */
+       REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+       /* XXX: Disable the panel fitter if it was on our pipe */
+
+       /* Disable dpll if necessary */
+       dpll = REG_READ(DPLL_CTRL);
+       if ((dpll & DPLL_PWRDN) == 0) {
+               REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET));
+               REG_WRITE(DPLL_DIV_CTRL, 0x00000000);
+               REG_WRITE(DPLL_STATUS, 0x1);
+       }
+       udelay(150);
+
+       /* reset controller: FIXME - can we sort out the ioremap mess ? */
+       iounmap(hdmi_dev->regs);
+       oaktrail_hdmi_reset(dev);
+
+       /* program and enable dpll */
+       refclk = 25000;
+       oaktrail_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
+
+       /* Setting DPLL */
+       dpll = REG_READ(DPLL_CTRL);
+       dpll &= ~DPLL_PDIV_MASK;
+       dpll &= ~(DPLL_PWRDN | DPLL_RESET);
+       REG_WRITE(DPLL_CTRL, 0x00000008);
+       REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr));
+       REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1));
+       REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN));
+       REG_WRITE(DPLL_UPDATE, 0x80000000);
+       REG_WRITE(DPLL_CLK_ENABLE, 0x80050102);
+       udelay(150);
+
+       hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len);
+       if (hdmi_dev->regs == NULL) {
+               DRM_ERROR("failed to do hdmi mmio mapping\n");
+               return -ENOMEM;
+       }
+
+       /* configure HDMI */
+       HDMI_WRITE(0x1004, 0x1fd);
+       HDMI_WRITE(0x2000, 0x1);
+       HDMI_WRITE(0x2008, 0x0);
+       HDMI_WRITE(0x3130, 0x8);
+       HDMI_WRITE(0x101c, 0x1800810);
+
+       temp = htotal_calculate(adjusted_mode);
+       REG_WRITE(htot_reg, temp);
+       REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+       REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+       REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+       REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+       REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+       REG_WRITE(pipesrc_reg,
+               ((mode->crtc_hdisplay - 1) << 16) |  (mode->crtc_vdisplay - 1));
+
+       REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
+       REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+       REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+       REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+       REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+       REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+       REG_WRITE(PCH_PIPEBSRC,
+               ((mode->crtc_hdisplay - 1) << 16) |  (mode->crtc_vdisplay - 1));
+
+       temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
+       HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) |  temp);
+
+       REG_WRITE(dspsize_reg,
+                       ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+       REG_WRITE(dsppos_reg, 0);
+
+       /* Flush the plane changes */
+       {
+               struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+               crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+       }
+
+       /* Set up the display plane register */
+       dspcntr = REG_READ(dspcntr_reg);
+       dspcntr |= DISPPLANE_GAMMA_ENABLE;
+       dspcntr |= DISPPLANE_SEL_PIPE_B;
+       dspcntr |= DISPLAY_PLANE_ENABLE;
+
+       /* setup pipeconf */
+       pipeconf = REG_READ(pipeconf_reg);
+       pipeconf |= PIPEACONF_ENABLE;
+
+       REG_WRITE(pipeconf_reg, pipeconf);
+       REG_READ(pipeconf_reg);
+
+       REG_WRITE(PCH_PIPEBCONF, pipeconf);
+       REG_READ(PCH_PIPEBCONF);
+       wait_for_vblank(dev);
+
+       REG_WRITE(dspcntr_reg, dspcntr);
+       wait_for_vblank(dev);
+
+       return 0;
+}
+
+static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
+                               struct drm_display_mode *mode)
+{
+       if (mode->clock > 165000)
+               return MODE_CLOCK_HIGH;
+       if (mode->clock < 20000)
+               return MODE_CLOCK_LOW;
+
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
+       return MODE_OK;
+}
+
+static bool oaktrail_hdmi_mode_fixup(struct drm_encoder *encoder,
+                                struct drm_display_mode *mode,
+                                struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static enum drm_connector_status
+oaktrail_hdmi_detect(struct drm_connector *connector, bool force)
+{
+       enum drm_connector_status status;
+       struct drm_device *dev = connector->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+       u32 temp;
+
+       temp = HDMI_READ(HDMI_HSR);
+       DRM_DEBUG_KMS("HDMI_HSR %x\n", temp);
+
+       if ((temp & HDMI_DETECT_HDP) != 0)
+               status = connector_status_connected;
+       else
+               status = connector_status_disconnected;
+
+       return status;
+}
+
+static const unsigned char raw_edid[] = {
+       0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x10, 0xac, 0x2f, 0xa0,
+       0x53, 0x55, 0x33, 0x30, 0x16, 0x13, 0x01, 0x03, 0x0e, 0x3a, 0x24, 0x78,
+       0xea, 0xe9, 0xf5, 0xac, 0x51, 0x30, 0xb4, 0x25, 0x11, 0x50, 0x54, 0xa5,
+       0x4b, 0x00, 0x81, 0x80, 0xa9, 0x40, 0x71, 0x4f, 0xb3, 0x00, 0x01, 0x01,
+       0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
+       0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x46, 0x6c, 0x21, 0x00, 0x00, 0x1a,
+       0x00, 0x00, 0x00, 0xff, 0x00, 0x47, 0x4e, 0x37, 0x32, 0x31, 0x39, 0x35,
+       0x52, 0x30, 0x33, 0x55, 0x53, 0x0a, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x44,
+       0x45, 0x4c, 0x4c, 0x20, 0x32, 0x37, 0x30, 0x39, 0x57, 0x0a, 0x20, 0x20,
+       0x00, 0x00, 0x00, 0xfd, 0x00, 0x38, 0x4c, 0x1e, 0x53, 0x11, 0x00, 0x0a,
+       0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x8d
+};
+
+static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct i2c_adapter *i2c_adap;
+       struct edid *edid;
+       struct drm_display_mode *mode, *t;
+       int i = 0, ret = 0;
+
+       i2c_adap = i2c_get_adapter(3);
+       if (i2c_adap == NULL) {
+               DRM_ERROR("No ddc adapter available!\n");
+               edid = (struct edid *)raw_edid;
+       } else {
+               edid = (struct edid *)raw_edid;
+               /* FIXME ? edid = drm_get_edid(connector, i2c_adap); */
+       }
+
+       if (edid) {
+               drm_mode_connector_update_edid_property(connector, edid);
+               ret = drm_add_edid_modes(connector, edid);
+               connector->display_info.raw_edid = NULL;
+       }
+
+       /*
+        * prune modes that require frame buffer bigger than stolen mem
+        */
+       list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
+               if ((mode->hdisplay * mode->vdisplay * 4) >= dev_priv->vram_stolen_size) {
+                       i++;
+                       drm_mode_remove(connector, mode);
+               }
+       }
+       return ret - i;
+}
+
+static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder,
+                              struct drm_display_mode *mode,
+                              struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+
+       oaktrail_hdmi_audio_enable(dev);
+       return;
+}
+
+static void oaktrail_hdmi_destroy(struct drm_connector *connector)
+{
+       return;
+}
+
+static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = {
+       .dpms = oaktrail_hdmi_dpms,
+       .mode_fixup = oaktrail_hdmi_mode_fixup,
+       .prepare = psb_intel_encoder_prepare,
+       .mode_set = oaktrail_hdmi_mode_set,
+       .commit = psb_intel_encoder_commit,
+};
+
+static const struct drm_connector_helper_funcs
+                                       oaktrail_hdmi_connector_helper_funcs = {
+       .get_modes = oaktrail_hdmi_get_modes,
+       .mode_valid = oaktrail_hdmi_mode_valid,
+       .best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .detect = oaktrail_hdmi_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = oaktrail_hdmi_destroy,
+};
+
+static void oaktrail_hdmi_enc_destroy(struct drm_encoder *encoder)
+{
+       drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = {
+       .destroy = oaktrail_hdmi_enc_destroy,
+};
+
+void oaktrail_hdmi_init(struct drm_device *dev,
+                                       struct psb_intel_mode_device *mode_dev)
+{
+       struct psb_intel_encoder *psb_intel_encoder;
+       struct psb_intel_connector *psb_intel_connector;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+
+       psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+       if (!psb_intel_encoder)
+               return;
+
+       psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+       if (!psb_intel_connector)
+               goto failed_connector;
+
+       connector = &psb_intel_connector->base;
+       encoder = &psb_intel_encoder->base;
+       drm_connector_init(dev, connector,
+                          &oaktrail_hdmi_connector_funcs,
+                          DRM_MODE_CONNECTOR_DVID);
+
+       drm_encoder_init(dev, encoder,
+                        &oaktrail_hdmi_enc_funcs,
+                        DRM_MODE_ENCODER_TMDS);
+
+       psb_intel_connector_attach_encoder(psb_intel_connector,
+                                          psb_intel_encoder);
+
+       psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
+       drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs);
+       drm_connector_helper_add(connector, &oaktrail_hdmi_connector_helper_funcs);
+
+       connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+       connector->interlace_allowed = false;
+       connector->doublescan_allowed = false;
+       drm_sysfs_connector_add(connector);
+
+       return;
+
+failed_connector:
+       kfree(psb_intel_encoder);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = {
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) },
+       {}
+};
+
+void oaktrail_hdmi_setup(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct pci_dev *pdev;
+       struct oaktrail_hdmi_dev *hdmi_dev;
+       int ret;
+
+       pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x080d, NULL);
+       if (!pdev)
+               return;
+
+       hdmi_dev = kzalloc(sizeof(struct oaktrail_hdmi_dev), GFP_KERNEL);
+       if (!hdmi_dev) {
+               dev_err(dev->dev, "failed to allocate memory\n");
+               goto out;
+       }
+
+
+       ret = pci_enable_device(pdev);
+       if (ret) {
+               dev_err(dev->dev, "failed to enable hdmi controller\n");
+               goto free;
+       }
+
+       hdmi_dev->mmio = pci_resource_start(pdev, 0);
+       hdmi_dev->mmio_len = pci_resource_len(pdev, 0);
+       hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len);
+       if (!hdmi_dev->regs) {
+               dev_err(dev->dev, "failed to map hdmi mmio\n");
+               goto free;
+       }
+
+       hdmi_dev->dev = pdev;
+       pci_set_drvdata(pdev, hdmi_dev);
+
+       /* Initialize i2c controller */
+       ret = oaktrail_hdmi_i2c_init(hdmi_dev->dev);
+       if (ret)
+               dev_err(dev->dev, "HDMI I2C initialization failed\n");
+
+       dev_priv->hdmi_priv = hdmi_dev;
+       oaktrail_hdmi_audio_disable(dev);
+       return;
+
+free:
+       kfree(hdmi_dev);
+out:
+       return;
+}
+
+void oaktrail_hdmi_teardown(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+       struct pci_dev *pdev;
+
+       if (hdmi_dev) {
+               pdev = hdmi_dev->dev;
+               pci_set_drvdata(pdev, NULL);
+               oaktrail_hdmi_i2c_exit(pdev);
+               iounmap(hdmi_dev->regs);
+               kfree(hdmi_dev);
+               pci_dev_put(pdev);
+       }
+}
+
+/* save HDMI register state */
+void oaktrail_hdmi_save(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+       int i;
+
+       /* dpll */
+       hdmi_dev->saveDPLL_CTRL = PSB_RVDC32(DPLL_CTRL);
+       hdmi_dev->saveDPLL_DIV_CTRL = PSB_RVDC32(DPLL_DIV_CTRL);
+       hdmi_dev->saveDPLL_ADJUST = PSB_RVDC32(DPLL_ADJUST);
+       hdmi_dev->saveDPLL_UPDATE = PSB_RVDC32(DPLL_UPDATE);
+       hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE);
+
+       /* pipe B */
+       dev_priv->savePIPEBCONF = PSB_RVDC32(PIPEBCONF);
+       dev_priv->savePIPEBSRC  = PSB_RVDC32(PIPEBSRC);
+       dev_priv->saveHTOTAL_B  = PSB_RVDC32(HTOTAL_B);
+       dev_priv->saveHBLANK_B  = PSB_RVDC32(HBLANK_B);
+       dev_priv->saveHSYNC_B   = PSB_RVDC32(HSYNC_B);
+       dev_priv->saveVTOTAL_B  = PSB_RVDC32(VTOTAL_B);
+       dev_priv->saveVBLANK_B  = PSB_RVDC32(VBLANK_B);
+       dev_priv->saveVSYNC_B   = PSB_RVDC32(VSYNC_B);
+
+       hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF);
+       hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC);
+       hdmi_dev->savePCH_HTOTAL_B = PSB_RVDC32(PCH_HTOTAL_B);
+       hdmi_dev->savePCH_HBLANK_B = PSB_RVDC32(PCH_HBLANK_B);
+       hdmi_dev->savePCH_HSYNC_B  = PSB_RVDC32(PCH_HSYNC_B);
+       hdmi_dev->savePCH_VTOTAL_B = PSB_RVDC32(PCH_VTOTAL_B);
+       hdmi_dev->savePCH_VBLANK_B = PSB_RVDC32(PCH_VBLANK_B);
+       hdmi_dev->savePCH_VSYNC_B  = PSB_RVDC32(PCH_VSYNC_B);
+
+       /* plane */
+       dev_priv->saveDSPBCNTR = PSB_RVDC32(DSPBCNTR);
+       dev_priv->saveDSPBSTRIDE = PSB_RVDC32(DSPBSTRIDE);
+       dev_priv->saveDSPBADDR = PSB_RVDC32(DSPBBASE);
+       dev_priv->saveDSPBSURF = PSB_RVDC32(DSPBSURF);
+       dev_priv->saveDSPBLINOFF = PSB_RVDC32(DSPBLINOFF);
+       dev_priv->saveDSPBTILEOFF = PSB_RVDC32(DSPBTILEOFF);
+
+       /* cursor B */
+       dev_priv->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
+       dev_priv->saveDSPBCURSOR_BASE = PSB_RVDC32(CURBBASE);
+       dev_priv->saveDSPBCURSOR_POS = PSB_RVDC32(CURBPOS);
+
+       /* save palette */
+       for (i = 0; i < 256; i++)
+               dev_priv->save_palette_b[i] = PSB_RVDC32(PALETTE_B + (i << 2));
+}
+
+/* restore HDMI register state */
+void oaktrail_hdmi_restore(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+       int i;
+
+       /* dpll */
+       PSB_WVDC32(hdmi_dev->saveDPLL_CTRL, DPLL_CTRL);
+       PSB_WVDC32(hdmi_dev->saveDPLL_DIV_CTRL, DPLL_DIV_CTRL);
+       PSB_WVDC32(hdmi_dev->saveDPLL_ADJUST, DPLL_ADJUST);
+       PSB_WVDC32(hdmi_dev->saveDPLL_UPDATE, DPLL_UPDATE);
+       PSB_WVDC32(hdmi_dev->saveDPLL_CLK_ENABLE, DPLL_CLK_ENABLE);
+       DRM_UDELAY(150);
+
+       /* pipe */
+       PSB_WVDC32(dev_priv->savePIPEBSRC, PIPEBSRC);
+       PSB_WVDC32(dev_priv->saveHTOTAL_B, HTOTAL_B);
+       PSB_WVDC32(dev_priv->saveHBLANK_B, HBLANK_B);
+       PSB_WVDC32(dev_priv->saveHSYNC_B,  HSYNC_B);
+       PSB_WVDC32(dev_priv->saveVTOTAL_B, VTOTAL_B);
+       PSB_WVDC32(dev_priv->saveVBLANK_B, VBLANK_B);
+       PSB_WVDC32(dev_priv->saveVSYNC_B,  VSYNC_B);
+
+       PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC);
+       PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B);
+       PSB_WVDC32(hdmi_dev->savePCH_HBLANK_B, PCH_HBLANK_B);
+       PSB_WVDC32(hdmi_dev->savePCH_HSYNC_B,  PCH_HSYNC_B);
+       PSB_WVDC32(hdmi_dev->savePCH_VTOTAL_B, PCH_VTOTAL_B);
+       PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B);
+       PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B,  PCH_VSYNC_B);
+
+       PSB_WVDC32(dev_priv->savePIPEBCONF, PIPEBCONF);
+       PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF);
+
+       /* plane */
+       PSB_WVDC32(dev_priv->saveDSPBLINOFF, DSPBLINOFF);
+       PSB_WVDC32(dev_priv->saveDSPBSTRIDE, DSPBSTRIDE);
+       PSB_WVDC32(dev_priv->saveDSPBTILEOFF, DSPBTILEOFF);
+       PSB_WVDC32(dev_priv->saveDSPBCNTR, DSPBCNTR);
+       PSB_WVDC32(dev_priv->saveDSPBSURF, DSPBSURF);
+
+       /* cursor B */
+       PSB_WVDC32(dev_priv->saveDSPBCURSOR_CTRL, CURBCNTR);
+       PSB_WVDC32(dev_priv->saveDSPBCURSOR_POS, CURBPOS);
+       PSB_WVDC32(dev_priv->saveDSPBCURSOR_BASE, CURBBASE);
+
+       /* restore palette */
+       for (i = 0; i < 256; i++)
+               PSB_WVDC32(dev_priv->save_palette_b[i], PALETTE_B + (i << 2));
+}
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
new file mode 100644 (file)
index 0000000..7054408
--- /dev/null
@@ -0,0 +1,328 @@
+/*
+ * Copyright Â© 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Li Peng <peng.li@intel.com>
+ */
+
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include "psb_drv.h"
+
+#define HDMI_READ(reg)         readl(hdmi_dev->regs + (reg))
+#define HDMI_WRITE(reg, val)   writel(val, hdmi_dev->regs + (reg))
+
+#define HDMI_HCR       0x1000
+#define HCR_DETECT_HDP         (1 << 6)
+#define HCR_ENABLE_HDCP                (1 << 5)
+#define HCR_ENABLE_AUDIO       (1 << 2)
+#define HCR_ENABLE_PIXEL       (1 << 1)
+#define HCR_ENABLE_TMDS                (1 << 0)
+#define HDMI_HICR      0x1004
+#define HDMI_INTR_I2C_ERROR    (1 << 4)
+#define HDMI_INTR_I2C_FULL     (1 << 3)
+#define HDMI_INTR_I2C_DONE     (1 << 2)
+#define HDMI_INTR_HPD          (1 << 0)
+#define HDMI_HSR       0x1008
+#define HDMI_HISR      0x100C
+#define HDMI_HI2CRDB0  0x1200
+#define HDMI_HI2CHCR   0x1240
+#define HI2C_HDCP_WRITE                (0 << 2)
+#define HI2C_HDCP_RI_READ      (1 << 2)
+#define HI2C_HDCP_READ         (2 << 2)
+#define HI2C_EDID_READ         (3 << 2)
+#define HI2C_READ_CONTINUE     (1 << 1)
+#define HI2C_ENABLE_TRANSACTION        (1 << 0)
+
+#define HDMI_ICRH      0x1100
+#define HDMI_HI2CTDR0  0x1244
+#define HDMI_HI2CTDR1  0x1248
+
+#define I2C_STAT_INIT          0
+#define I2C_READ_DONE          1
+#define I2C_TRANSACTION_DONE   2
+
+struct hdmi_i2c_dev {
+       struct i2c_adapter *adap;
+       struct mutex i2c_lock;
+       struct completion complete;
+       int status;
+       struct i2c_msg *msg;
+       int buf_offset;
+};
+
+static void hdmi_i2c_irq_enable(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+       u32 temp;
+
+       temp = HDMI_READ(HDMI_HICR);
+       temp |= (HDMI_INTR_I2C_ERROR | HDMI_INTR_I2C_FULL | HDMI_INTR_I2C_DONE);
+       HDMI_WRITE(HDMI_HICR, temp);
+       HDMI_READ(HDMI_HICR);
+}
+
+static void hdmi_i2c_irq_disable(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+       HDMI_WRITE(HDMI_HICR, 0x0);
+       HDMI_READ(HDMI_HICR);
+}
+
+static int xfer_read(struct i2c_adapter *adap, struct i2c_msg *pmsg)
+{
+       struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
+       struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+       u32 temp;
+
+       i2c_dev->status = I2C_STAT_INIT;
+       i2c_dev->msg = pmsg;
+       i2c_dev->buf_offset = 0;
+       INIT_COMPLETION(i2c_dev->complete);
+
+       /* Enable I2C transaction */
+       temp = ((pmsg->len) << 20) | HI2C_EDID_READ | HI2C_ENABLE_TRANSACTION;
+       HDMI_WRITE(HDMI_HI2CHCR, temp);
+       HDMI_READ(HDMI_HI2CHCR);
+
+       while (i2c_dev->status != I2C_TRANSACTION_DONE)
+               wait_for_completion_interruptible_timeout(&i2c_dev->complete,
+                                                               10 * HZ);
+
+       return 0;
+}
+
+static int xfer_write(struct i2c_adapter *adap, struct i2c_msg *pmsg)
+{
+       /*
+        * XXX: i2c write seems isn't useful for EDID probe, don't do anything
+        */
+       return 0;
+}
+
+static int oaktrail_hdmi_i2c_access(struct i2c_adapter *adap,
+                               struct i2c_msg *pmsg,
+                               int num)
+{
+       struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
+       struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+       int i, err = 0;
+
+       mutex_lock(&i2c_dev->i2c_lock);
+
+       /* Enable i2c unit */
+       HDMI_WRITE(HDMI_ICRH, 0x00008760);
+
+       /* Enable irq */
+       hdmi_i2c_irq_enable(hdmi_dev);
+       for (i = 0; i < num; i++) {
+               if (pmsg->len && pmsg->buf) {
+                       if (pmsg->flags & I2C_M_RD)
+                               err = xfer_read(adap, pmsg);
+                       else
+                               err = xfer_write(adap, pmsg);
+               }
+               pmsg++;         /* next message */
+       }
+
+       /* Disable irq */
+       hdmi_i2c_irq_disable(hdmi_dev);
+
+       mutex_unlock(&i2c_dev->i2c_lock);
+
+       return i;
+}
+
+static u32 oaktrail_hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+       return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_algorithm oaktrail_hdmi_i2c_algorithm = {
+       .master_xfer    = oaktrail_hdmi_i2c_access,
+       .functionality  = oaktrail_hdmi_i2c_func,
+};
+
+static struct i2c_adapter oaktrail_hdmi_i2c_adapter = {
+       .name           = "oaktrail_hdmi_i2c",
+       .nr             = 3,
+       .owner          = THIS_MODULE,
+       .class          = I2C_CLASS_DDC,
+       .algo           = &oaktrail_hdmi_i2c_algorithm,
+};
+
+static void hdmi_i2c_read(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+       struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+       struct i2c_msg *msg = i2c_dev->msg;
+       u8 *buf = msg->buf;
+       u32 temp;
+       int i, offset;
+
+       offset = i2c_dev->buf_offset;
+       for (i = 0; i < 0x10; i++) {
+               temp = HDMI_READ(HDMI_HI2CRDB0 + (i * 4));
+               memcpy(buf + (offset + i * 4), &temp, 4);
+       }
+       i2c_dev->buf_offset += (0x10 * 4);
+
+       /* clearing read buffer full intr */
+       temp = HDMI_READ(HDMI_HISR);
+       HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_FULL);
+       HDMI_READ(HDMI_HISR);
+
+       /* continue read transaction */
+       temp = HDMI_READ(HDMI_HI2CHCR);
+       HDMI_WRITE(HDMI_HI2CHCR, temp | HI2C_READ_CONTINUE);
+       HDMI_READ(HDMI_HI2CHCR);
+
+       i2c_dev->status = I2C_READ_DONE;
+       return;
+}
+
+static void hdmi_i2c_transaction_done(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+       struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+       u32 temp;
+
+       /* clear transaction done intr */
+       temp = HDMI_READ(HDMI_HISR);
+       HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_DONE);
+       HDMI_READ(HDMI_HISR);
+
+
+       temp = HDMI_READ(HDMI_HI2CHCR);
+       HDMI_WRITE(HDMI_HI2CHCR, temp & ~HI2C_ENABLE_TRANSACTION);
+       HDMI_READ(HDMI_HI2CHCR);
+
+       i2c_dev->status = I2C_TRANSACTION_DONE;
+       return;
+}
+
+static irqreturn_t oaktrail_hdmi_i2c_handler(int this_irq, void *dev)
+{
+       struct oaktrail_hdmi_dev *hdmi_dev = dev;
+       struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+       u32 stat;
+
+       stat = HDMI_READ(HDMI_HISR);
+
+       if (stat & HDMI_INTR_HPD) {
+               HDMI_WRITE(HDMI_HISR, stat | HDMI_INTR_HPD);
+               HDMI_READ(HDMI_HISR);
+       }
+
+       if (stat & HDMI_INTR_I2C_FULL)
+               hdmi_i2c_read(hdmi_dev);
+
+       if (stat & HDMI_INTR_I2C_DONE)
+               hdmi_i2c_transaction_done(hdmi_dev);
+
+       complete(&i2c_dev->complete);
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * choose alternate function 2 of GPIO pin 52, 53,
+ * which is used by HDMI I2C logic
+ */
+static void oaktrail_hdmi_i2c_gpio_fix(void)
+{
+       void *base;
+       unsigned int gpio_base = 0xff12c000;
+       int gpio_len = 0x1000;
+       u32 temp;
+
+       base = ioremap((resource_size_t)gpio_base, gpio_len);
+       if (base == NULL) {
+               DRM_ERROR("gpio ioremap fail\n");
+               return;
+       }
+
+       temp = readl(base + 0x44);
+       DRM_DEBUG_DRIVER("old gpio val %x\n", temp);
+       writel((temp | 0x00000a00), (base +  0x44));
+       temp = readl(base + 0x44);
+       DRM_DEBUG_DRIVER("new gpio val %x\n", temp);
+
+       iounmap(base);
+}
+
+int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
+{
+       struct oaktrail_hdmi_dev *hdmi_dev;
+       struct hdmi_i2c_dev *i2c_dev;
+       int ret;
+
+       hdmi_dev = pci_get_drvdata(dev);
+
+       i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL);
+       if (i2c_dev == NULL) {
+               DRM_ERROR("Can't allocate interface\n");
+               ret = -ENOMEM;
+               goto exit;
+       }
+
+       i2c_dev->adap = &oaktrail_hdmi_i2c_adapter;
+       i2c_dev->status = I2C_STAT_INIT;
+       init_completion(&i2c_dev->complete);
+       mutex_init(&i2c_dev->i2c_lock);
+       i2c_set_adapdata(&oaktrail_hdmi_i2c_adapter, hdmi_dev);
+       hdmi_dev->i2c_dev = i2c_dev;
+
+       /* Enable HDMI I2C function on gpio */
+       oaktrail_hdmi_i2c_gpio_fix();
+
+       /* request irq */
+       ret = request_irq(dev->irq, oaktrail_hdmi_i2c_handler, IRQF_SHARED,
+                         oaktrail_hdmi_i2c_adapter.name, hdmi_dev);
+       if (ret) {
+               DRM_ERROR("Failed to request IRQ for I2C controller\n");
+               goto err;
+       }
+
+       /* Adapter registration */
+       ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter);
+       return ret;
+
+err:
+       kfree(i2c_dev);
+exit:
+       return ret;
+}
+
+void oaktrail_hdmi_i2c_exit(struct pci_dev *dev)
+{
+       struct oaktrail_hdmi_dev *hdmi_dev;
+       struct hdmi_i2c_dev *i2c_dev;
+
+       hdmi_dev = pci_get_drvdata(dev);
+       if (i2c_del_adapter(&oaktrail_hdmi_i2c_adapter))
+               DRM_DEBUG_DRIVER("Failed to delete hdmi-i2c adapter\n");
+
+       i2c_dev = hdmi_dev->i2c_dev;
+       kfree(i2c_dev);
+       free_irq(dev->irq, hdmi_dev);
+}
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
new file mode 100644 (file)
index 0000000..238bbe1
--- /dev/null
@@ -0,0 +1,449 @@
+/*
+ * Copyright Â© 2006-2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ *     Dave Airlie <airlied@linux.ie>
+ *     Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+#include <asm/mrst.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+
+/* The max/min PWM frequency in BPCR[31:17] - */
+/* The smallest number is 1 (not 0) that can fit in the
+ * 15-bit field of the and then*/
+/* shifts to the left by one bit to get the actual 16-bit
+ * value that the 15-bits correspond to.*/
+#define MRST_BLC_MAX_PWM_REG_FREQ          0xFFFF
+#define BRIGHTNESS_MAX_LEVEL 100
+
+/**
+ * Sets the power state for the panel.
+ */
+static void oaktrail_lvds_set_power(struct drm_device *dev,
+                               struct psb_intel_encoder *psb_intel_encoder,
+                               bool on)
+{
+       u32 pp_status;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (!gma_power_begin(dev, true))
+               return;
+
+       if (on) {
+               REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+                         POWER_TARGET_ON);
+               do {
+                       pp_status = REG_READ(PP_STATUS);
+               } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
+               dev_priv->is_lvds_on = true;
+               if (dev_priv->ops->lvds_bl_power)
+                       dev_priv->ops->lvds_bl_power(dev, true);
+       } else {
+               if (dev_priv->ops->lvds_bl_power)
+                       dev_priv->ops->lvds_bl_power(dev, false);
+               REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+                         ~POWER_TARGET_ON);
+               do {
+                       pp_status = REG_READ(PP_STATUS);
+               } while (pp_status & PP_ON);
+               dev_priv->is_lvds_on = false;
+               pm_request_idle(&dev->pdev->dev);
+       }
+       gma_power_end(dev);
+}
+
+static void oaktrail_lvds_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                               to_psb_intel_encoder(encoder);
+
+       if (mode == DRM_MODE_DPMS_ON)
+               oaktrail_lvds_set_power(dev, psb_intel_encoder, true);
+       else
+               oaktrail_lvds_set_power(dev, psb_intel_encoder, false);
+
+       /* XXX: We never power down the LVDS pairs. */
+}
+
+static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
+                              struct drm_display_mode *mode,
+                              struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_connector *connector = NULL;
+       struct drm_crtc *crtc = encoder->crtc;
+       u32 lvds_port;
+       uint64_t v = DRM_MODE_SCALE_FULLSCREEN;
+
+       if (!gma_power_begin(dev, true))
+               return;
+
+       /*
+        * The LVDS pin pair will already have been turned on in the
+        * psb_intel_crtc_mode_set since it has a large impact on the DPLL
+        * settings.
+        */
+       lvds_port = (REG_READ(LVDS) &
+                   (~LVDS_PIPEB_SELECT)) |
+                   LVDS_PORT_EN |
+                   LVDS_BORDER_EN;
+
+       /* If the firmware says dither on Moorestown, or the BIOS does
+          on Oaktrail then enable dithering */
+       if (mode_dev->panel_wants_dither || dev_priv->lvds_dither)
+               lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
+
+       REG_WRITE(LVDS, lvds_port);
+
+       /* Find the connector we're trying to set up */
+       list_for_each_entry(connector, &mode_config->connector_list, head) {
+               if (!connector->encoder || connector->encoder->crtc != crtc)
+                       continue;
+       }
+
+       if (!connector) {
+               DRM_ERROR("Couldn't find connector when setting mode");
+               return;
+       }
+
+       drm_connector_property_get_value(
+               connector,
+               dev->mode_config.scaling_mode_property,
+               &v);
+
+       if (v == DRM_MODE_SCALE_NO_SCALE)
+               REG_WRITE(PFIT_CONTROL, 0);
+       else if (v == DRM_MODE_SCALE_ASPECT) {
+               if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) ||
+                   (mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
+                       if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) ==
+                           (mode->hdisplay * adjusted_mode->crtc_vdisplay))
+                               REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
+                       else if ((adjusted_mode->crtc_hdisplay *
+                               mode->vdisplay) > (mode->hdisplay *
+                               adjusted_mode->crtc_vdisplay))
+                               REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
+                                         PFIT_SCALING_MODE_PILLARBOX);
+                       else
+                               REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
+                                         PFIT_SCALING_MODE_LETTERBOX);
+               } else
+                       REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
+       } else /*(v == DRM_MODE_SCALE_FULLSCREEN)*/
+               REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
+
+       gma_power_end(dev);
+}
+
+static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                               to_psb_intel_encoder(encoder);
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+       if (!gma_power_begin(dev, true))
+               return;
+
+       mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+       mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+                                         BACKLIGHT_DUTY_CYCLE_MASK);
+       oaktrail_lvds_set_power(dev, psb_intel_encoder, false);
+       gma_power_end(dev);
+}
+
+static u32 oaktrail_lvds_get_max_backlight(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 ret;
+
+       if (gma_power_begin(dev, false)) {
+               ret = ((REG_READ(BLC_PWM_CTL) &
+                         BACKLIGHT_MODULATION_FREQ_MASK) >>
+                         BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+               gma_power_end(dev);
+       } else
+               ret = ((dev_priv->saveBLC_PWM_CTL &
+                         BACKLIGHT_MODULATION_FREQ_MASK) >>
+                         BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+       return ret;
+}
+
+static void oaktrail_lvds_commit(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                               to_psb_intel_encoder(encoder);
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+       if (mode_dev->backlight_duty_cycle == 0)
+               mode_dev->backlight_duty_cycle =
+                                       oaktrail_lvds_get_max_backlight(dev);
+       oaktrail_lvds_set_power(dev, psb_intel_encoder, true);
+}
+
+static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = {
+       .dpms = oaktrail_lvds_dpms,
+       .mode_fixup = psb_intel_lvds_mode_fixup,
+       .prepare = oaktrail_lvds_prepare,
+       .mode_set = oaktrail_lvds_mode_set,
+       .commit = oaktrail_lvds_commit,
+};
+
+static struct drm_display_mode lvds_configuration_modes[] = {
+       /* hard coded fixed mode for TPO LTPS LPJ040K001A */
+       { DRM_MODE("800x480",  DRM_MODE_TYPE_DRIVER, 33264, 800, 836,
+                  846, 1056, 0, 480, 489, 491, 525, 0, 0) },
+       /* hard coded fixed mode for LVDS 800x480 */
+       { DRM_MODE("800x480",  DRM_MODE_TYPE_DRIVER, 30994, 800, 801,
+                  802, 1024, 0, 480, 481, 482, 525, 0, 0) },
+       /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
+       { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1072,
+                  1104, 1184, 0, 600, 603, 604, 608, 0, 0) },
+       /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
+       { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1104,
+                  1136, 1184, 0, 600, 603, 604, 608, 0, 0) },
+       /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
+       { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 48885, 1024, 1124,
+                  1204, 1312, 0, 600, 607, 610, 621, 0, 0) },
+       /* hard coded fixed mode for LVDS 1024x768 */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+                  1184, 1344, 0, 768, 771, 777, 806, 0, 0) },
+       /* hard coded fixed mode for LVDS 1366x768 */
+       { DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 77500, 1366, 1430,
+                  1558, 1664, 0, 768, 769, 770, 776, 0, 0) },
+};
+
+/* Returns the panel fixed mode from configuration. */
+
+static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
+                                       struct psb_intel_mode_device *mode_dev)
+{
+       struct drm_display_mode *mode = NULL;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
+
+       mode_dev->panel_fixed_mode = NULL;
+
+       /* Use the firmware provided data on Moorestown */
+       if (dev_priv->vbt_data.size != 0x00) { /*if non-zero, then use vbt*/
+               mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+               if (!mode)
+                       return;
+
+               mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
+               mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
+               mode->hsync_start = mode->hdisplay + \
+                               ((ti->hsync_offset_hi << 8) | \
+                               ti->hsync_offset_lo);
+               mode->hsync_end = mode->hsync_start + \
+                               ((ti->hsync_pulse_width_hi << 8) | \
+                               ti->hsync_pulse_width_lo);
+               mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
+                                                       ti->hblank_lo);
+               mode->vsync_start = \
+                       mode->vdisplay + ((ti->vsync_offset_hi << 4) | \
+                                               ti->vsync_offset_lo);
+               mode->vsync_end = \
+                       mode->vsync_start + ((ti->vsync_pulse_width_hi << 4) | \
+                                               ti->vsync_pulse_width_lo);
+               mode->vtotal = mode->vdisplay + \
+                               ((ti->vblank_hi << 8) | ti->vblank_lo);
+               mode->clock = ti->pixel_clock * 10;
+#if 0
+               printk(KERN_INFO "hdisplay is %d\n", mode->hdisplay);
+               printk(KERN_INFO "vdisplay is %d\n", mode->vdisplay);
+               printk(KERN_INFO "HSS is %d\n", mode->hsync_start);
+               printk(KERN_INFO "HSE is %d\n", mode->hsync_end);
+               printk(KERN_INFO "htotal is %d\n", mode->htotal);
+               printk(KERN_INFO "VSS is %d\n", mode->vsync_start);
+               printk(KERN_INFO "VSE is %d\n", mode->vsync_end);
+               printk(KERN_INFO "vtotal is %d\n", mode->vtotal);
+               printk(KERN_INFO "clock is %d\n", mode->clock);
+#endif
+               mode_dev->panel_fixed_mode = mode;
+       }
+
+       /* Use the BIOS VBT mode if available */
+       if (mode_dev->panel_fixed_mode == NULL && mode_dev->vbt_mode)
+               mode_dev->panel_fixed_mode = drm_mode_duplicate(dev,
+                                               mode_dev->vbt_mode);
+
+       /* Then try the LVDS VBT mode */
+       if (mode_dev->panel_fixed_mode == NULL)
+               if (dev_priv->lfp_lvds_vbt_mode)
+                       mode_dev->panel_fixed_mode =
+                               drm_mode_duplicate(dev,
+                                       dev_priv->lfp_lvds_vbt_mode);
+       /* Then guess */
+       if (mode_dev->panel_fixed_mode == NULL)
+               mode_dev->panel_fixed_mode
+                       = drm_mode_duplicate(dev, &lvds_configuration_modes[2]);
+
+       drm_mode_set_name(mode_dev->panel_fixed_mode);
+       drm_mode_set_crtcinfo(mode_dev->panel_fixed_mode, 0);
+}
+
+/**
+ * oaktrail_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void oaktrail_lvds_init(struct drm_device *dev,
+                   struct psb_intel_mode_device *mode_dev)
+{
+       struct psb_intel_encoder *psb_intel_encoder;
+       struct psb_intel_connector *psb_intel_connector;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct edid *edid;
+       int ret = 0;
+       struct i2c_adapter *i2c_adap;
+       struct drm_display_mode *scan;  /* *modes, *bios_mode; */
+
+       psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+       if (!psb_intel_encoder)
+               return;
+
+       psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+       if (!psb_intel_connector)
+               goto failed_connector;
+
+       connector = &psb_intel_connector->base;
+       encoder = &psb_intel_encoder->base;
+       dev_priv->is_lvds_on = true;
+       drm_connector_init(dev, connector,
+                          &psb_intel_lvds_connector_funcs,
+                          DRM_MODE_CONNECTOR_LVDS);
+
+       drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
+                        DRM_MODE_ENCODER_LVDS);
+
+       psb_intel_connector_attach_encoder(psb_intel_connector,
+                                          psb_intel_encoder);
+       psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+
+       drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs);
+       drm_connector_helper_add(connector,
+                                &psb_intel_lvds_connector_helper_funcs);
+       connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+       connector->interlace_allowed = false;
+       connector->doublescan_allowed = false;
+
+       drm_connector_attach_property(connector,
+                                       dev->mode_config.scaling_mode_property,
+                                       DRM_MODE_SCALE_FULLSCREEN);
+       drm_connector_attach_property(connector,
+                                       dev_priv->backlight_property,
+                                       BRIGHTNESS_MAX_LEVEL);
+
+       mode_dev->panel_wants_dither = false;
+       if (dev_priv->vbt_data.size != 0x00)
+               mode_dev->panel_wants_dither = (dev_priv->gct_data.
+                       Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE);
+        if (dev_priv->lvds_dither)
+                mode_dev->panel_wants_dither = 1;
+
+       /*
+        * LVDS discovery:
+        * 1) check for EDID on DDC
+        * 2) check for VBT data
+        * 3) check to see if LVDS is already on
+        *    if none of the above, no panel
+        * 4) make sure lid is open
+        *    if closed, act like it's not there for now
+        */
+
+       i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus);
+       if (i2c_adap == NULL)
+               dev_err(dev->dev, "No ddc adapter available!\n");
+       /*
+        * Attempt to get the fixed panel mode from DDC.  Assume that the
+        * preferred mode is the right one.
+        */
+       if (i2c_adap) {
+               edid = drm_get_edid(connector, i2c_adap);
+               if (edid) {
+                       drm_mode_connector_update_edid_property(connector,
+                                                                       edid);
+                       ret = drm_add_edid_modes(connector, edid);
+                       kfree(edid);
+               }
+
+               list_for_each_entry(scan, &connector->probed_modes, head) {
+                       if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+                               mode_dev->panel_fixed_mode =
+                                   drm_mode_duplicate(dev, scan);
+                               goto out;       /* FIXME: check for quirks */
+                       }
+               }
+       }
+       /*
+        * If we didn't get EDID, try geting panel timing
+        * from configuration data
+        */
+       oaktrail_lvds_get_configuration_mode(dev, mode_dev);
+
+       if (mode_dev->panel_fixed_mode) {
+               mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+               goto out;       /* FIXME: check for quirks */
+       }
+
+       /* If we still don't have a mode after all that, give up. */
+       if (!mode_dev->panel_fixed_mode) {
+               dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
+               goto failed_find;
+       }
+
+out:
+       drm_sysfs_connector_add(connector);
+       return;
+
+failed_find:
+       dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
+       if (psb_intel_encoder->ddc_bus)
+               psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+
+/* failed_ddc: */
+
+       drm_encoder_cleanup(encoder);
+       drm_connector_cleanup(connector);
+       kfree(psb_intel_connector);
+failed_connector:
+       kfree(psb_intel_encoder);
+}
+
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
new file mode 100644 (file)
index 0000000..9402569
--- /dev/null
@@ -0,0 +1,316 @@
+/**************************************************************************
+ * Copyright (c) 2009-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Benjamin Defnet <benjamin.r.defnet@intel.com>
+ *    Rajesh Poornachandran <rajesh.poornachandran@intel.com>
+ * Massively reworked
+ *    Alan Cox <alan@linux.intel.com>
+ */
+
+#include "power.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+
+static struct mutex power_mutex;       /* Serialize power ops */
+static spinlock_t power_ctrl_lock;     /* Serialize power claim */
+
+/**
+ *     gma_power_init          -       initialise power manager
+ *     @dev: our device
+ *
+ *     Set up for power management tracking of our hardware.
+ */
+void gma_power_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       /* FIXME: Move APM/OSPM base into relevant device code */
+       dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
+       dev_priv->ospm_base &= 0xffff;
+
+       dev_priv->display_power = true; /* We start active */
+       dev_priv->display_count = 0;    /* Currently no users */
+       dev_priv->suspended = false;    /* And not suspended */
+       spin_lock_init(&power_ctrl_lock);
+       mutex_init(&power_mutex);
+
+       dev_priv->ops->init_pm(dev);
+}
+
+/**
+ *     gma_power_uninit        -       end power manager
+ *     @dev: device to end for
+ *
+ *     Undo the effects of gma_power_init
+ */
+void gma_power_uninit(struct drm_device *dev)
+{
+       pm_runtime_disable(&dev->pdev->dev);
+       pm_runtime_set_suspended(&dev->pdev->dev);
+}
+
+/**
+ *     gma_suspend_display     -       suspend the display logic
+ *     @dev: our DRM device
+ *
+ *     Suspend the display logic of the graphics interface
+ */
+static void gma_suspend_display(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (dev_priv->suspended)
+               return;
+       dev_priv->ops->save_regs(dev);
+       dev_priv->ops->power_down(dev);
+       dev_priv->display_power = false;
+}
+
+/**
+ *     gma_resume_display      -       resume display side logic
+ *
+ *     Resume the display hardware restoring state and enabling
+ *     as necessary.
+ */
+static void gma_resume_display(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (dev_priv->suspended == false)
+               return;
+
+       /* turn on the display power island */
+       dev_priv->ops->power_up(dev);
+       dev_priv->suspended = false;
+       dev_priv->display_power = true;
+
+       PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
+       pci_write_config_word(pdev, PSB_GMCH_CTRL,
+                       dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
+       dev_priv->ops->restore_regs(dev);
+}
+
+/**
+ *     gma_suspend_pci         -       suspend PCI side
+ *     @pdev: PCI device
+ *
+ *     Perform the suspend processing on our PCI device state
+ */
+static void gma_suspend_pci(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int bsm, vbt;
+
+       if (dev_priv->suspended)
+               return;
+
+       pci_save_state(pdev);
+       pci_read_config_dword(pdev, 0x5C, &bsm);
+       dev_priv->saveBSM = bsm;
+       pci_read_config_dword(pdev, 0xFC, &vbt);
+       dev_priv->saveVBT = vbt;
+       pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
+       pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
+
+       pci_disable_device(pdev);
+       pci_set_power_state(pdev, PCI_D3hot);
+
+       dev_priv->suspended = true;
+}
+
+/**
+ *     gma_resume_pci          -       resume helper
+ *     @dev: our PCI device
+ *
+ *     Perform the resume processing on our PCI device state - rewrite
+ *     register state and re-enable the PCI device
+ */
+static bool gma_resume_pci(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int ret;
+
+       if (!dev_priv->suspended)
+               return true;
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+       pci_write_config_dword(pdev, 0x5c, dev_priv->saveBSM);
+       pci_write_config_dword(pdev, 0xFC, dev_priv->saveVBT);
+       /* restoring MSI address and data in PCIx space */
+       pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
+       pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
+       ret = pci_enable_device(pdev);
+
+       if (ret != 0)
+               dev_err(&pdev->dev, "pci_enable failed: %d\n", ret);
+       else
+               dev_priv->suspended = false;
+       return !dev_priv->suspended;
+}
+
+/**
+ *     gma_power_suspend               -       bus callback for suspend
+ *     @pdev: our PCI device
+ *     @state: suspend type
+ *
+ *     Called back by the PCI layer during a suspend of the system. We
+ *     perform the necessary shut down steps and save enough state that
+ *     we can undo this when resume is called.
+ */
+int gma_power_suspend(struct device *_dev)
+{
+       struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev);
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       mutex_lock(&power_mutex);
+       if (!dev_priv->suspended) {
+               if (dev_priv->display_count) {
+                       mutex_unlock(&power_mutex);
+                       return -EBUSY;
+               }
+               psb_irq_uninstall(dev);
+               gma_suspend_display(dev);
+               gma_suspend_pci(pdev);
+       }
+       mutex_unlock(&power_mutex);
+       return 0;
+}
+
+/**
+ *     gma_power_resume                -       resume power
+ *     @pdev: PCI device
+ *
+ *     Resume the PCI side of the graphics and then the displays
+ */
+int gma_power_resume(struct device *_dev)
+{
+       struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev);
+       struct drm_device *dev = pci_get_drvdata(pdev);
+
+       mutex_lock(&power_mutex);
+       gma_resume_pci(pdev);
+       gma_resume_display(pdev);
+       psb_irq_preinstall(dev);
+       psb_irq_postinstall(dev);
+       mutex_unlock(&power_mutex);
+       return 0;
+}
+
+/**
+ *     gma_power_is_on         -       returne true if power is on
+ *     @dev: our DRM device
+ *
+ *     Returns true if the display island power is on at this moment
+ */
+bool gma_power_is_on(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       return dev_priv->display_power;
+}
+
+/**
+ *     gma_power_begin         -       begin requiring power
+ *     @dev: our DRM device
+ *     @force_on: true to force power on
+ *
+ *     Begin an action that requires the display power island is enabled.
+ *     We refcount the islands.
+ */
+bool gma_power_begin(struct drm_device *dev, bool force_on)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(&power_ctrl_lock, flags);
+       /* Power already on ? */
+       if (dev_priv->display_power) {
+               dev_priv->display_count++;
+               pm_runtime_get(&dev->pdev->dev);
+               spin_unlock_irqrestore(&power_ctrl_lock, flags);
+               return true;
+       }
+       if (force_on == false)
+               goto out_false;
+
+       /* Ok power up needed */
+       ret = gma_resume_pci(dev->pdev);
+       if (ret == 0) {
+               psb_irq_preinstall(dev);
+               psb_irq_postinstall(dev);
+               pm_runtime_get(&dev->pdev->dev);
+               dev_priv->display_count++;
+               spin_unlock_irqrestore(&power_ctrl_lock, flags);
+               return true;
+       }
+out_false:
+       spin_unlock_irqrestore(&power_ctrl_lock, flags);
+       return false;
+}
+
+/**
+ *     gma_power_end           -       end use of power
+ *     @dev: Our DRM device
+ *
+ *     Indicate that one of our gma_power_begin() requested periods when
+ *     the diplay island power is needed has completed.
+ */
+void gma_power_end(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned long flags;
+       spin_lock_irqsave(&power_ctrl_lock, flags);
+       dev_priv->display_count--;
+       WARN_ON(dev_priv->display_count < 0);
+       spin_unlock_irqrestore(&power_ctrl_lock, flags);
+       pm_runtime_put(&dev->pdev->dev);
+}
+
+int psb_runtime_suspend(struct device *dev)
+{
+       return gma_power_suspend(dev);
+}
+
+int psb_runtime_resume(struct device *dev)
+{
+       return gma_power_resume(dev);;
+}
+
+int psb_runtime_idle(struct device *dev)
+{
+       struct drm_device *drmdev = pci_get_drvdata(to_pci_dev(dev));
+       struct drm_psb_private *dev_priv = drmdev->dev_private;
+       if (dev_priv->display_count)
+               return 0;
+       else
+               return 1;
+}
diff --git a/drivers/gpu/drm/gma500/power.h b/drivers/gpu/drm/gma500/power.h
new file mode 100644 (file)
index 0000000..1969d2e
--- /dev/null
@@ -0,0 +1,67 @@
+/**************************************************************************
+ * Copyright (c) 2009-2011, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Benjamin Defnet <benjamin.r.defnet@intel.com>
+ *    Rajesh Poornachandran <rajesh.poornachandran@intel.com>
+ * Massively reworked
+ *    Alan Cox <alan@linux.intel.com>
+ */
+#ifndef _PSB_POWERMGMT_H_
+#define _PSB_POWERMGMT_H_
+
+#include <linux/pci.h>
+#include <drm/drmP.h>
+
+void gma_power_init(struct drm_device *dev);
+void gma_power_uninit(struct drm_device *dev);
+
+/*
+ * The kernel bus power management  will call these functions
+ */
+int gma_power_suspend(struct device *dev);
+int gma_power_resume(struct device *dev);
+
+/*
+ * These are the functions the driver should use to wrap all hw access
+ * (i.e. register reads and writes)
+ */
+bool gma_power_begin(struct drm_device *dev, bool force);
+void gma_power_end(struct drm_device *dev);
+
+/*
+ * Use this function to do an instantaneous check for if the hw is on.
+ * Only use this in cases where you know the mutex is already held such
+ * as in irq install/uninstall and you need to
+ * prevent a deadlock situation.  Otherwise use gma_power_begin().
+ */
+bool gma_power_is_on(struct drm_device *dev);
+
+/*
+ * GFX-Runtime PM callbacks
+ */
+int psb_runtime_suspend(struct device *dev);
+int psb_runtime_resume(struct device *dev);
+int psb_runtime_idle(struct device *dev);
+
+#endif /*_PSB_POWERMGMT_H_*/
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
new file mode 100644 (file)
index 0000000..e5f5906
--- /dev/null
@@ -0,0 +1,328 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/backlight.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+
+
+static int psb_output_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       psb_intel_lvds_init(dev, &dev_priv->mode_dev);
+       psb_intel_sdvo_init(dev, SDVOB);
+       return 0;
+}
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+/*
+ *     Poulsbo Backlight Interfaces
+ */
+
+#define BLC_PWM_PRECISION_FACTOR 100   /* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+
+#define PSB_BLC_PWM_PRECISION_FACTOR    10
+#define PSB_BLC_MAX_PWM_REG_FREQ        0xFFFE
+#define PSB_BLC_MIN_PWM_REG_FREQ        0x2
+
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT    (16)
+
+static int psb_brightness;
+static struct backlight_device *psb_backlight_device;
+
+static int psb_get_brightness(struct backlight_device *bd)
+{
+       /* return locally cached var instead of HW read (due to DPST etc.) */
+       /* FIXME: ideally return actual value in case firmware fiddled with
+          it */
+       return psb_brightness;
+}
+
+
+static int psb_backlight_setup(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned long core_clock;
+       /* u32 bl_max_freq; */
+       /* unsigned long value; */
+       u16 bl_max_freq;
+       uint32_t value;
+       uint32_t blc_pwm_precision_factor;
+
+       /* get bl_max_freq and pol from dev_priv*/
+       if (!dev_priv->lvds_bl) {
+               dev_err(dev->dev, "Has no valid LVDS backlight info\n");
+               return -ENOENT;
+       }
+       bl_max_freq = dev_priv->lvds_bl->freq;
+       blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
+
+       core_clock = dev_priv->core_freq;
+
+       value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
+       value *= blc_pwm_precision_factor;
+       value /= bl_max_freq;
+       value /= blc_pwm_precision_factor;
+
+       if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
+                value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
+                               return -ERANGE;
+       else {
+               value &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+               REG_WRITE(BLC_PWM_CTL,
+                       (value << PSB_BACKLIGHT_PWM_CTL_SHIFT) | (value));
+       }
+       return 0;
+}
+
+static int psb_set_brightness(struct backlight_device *bd)
+{
+       struct drm_device *dev = bl_get_data(psb_backlight_device);
+       int level = bd->props.brightness;
+
+       /* Percentage 1-100% being valid */
+       if (level < 1)
+               level = 1;
+
+       psb_intel_lvds_set_brightness(dev, level);
+       psb_brightness = level;
+       return 0;
+}
+
+static const struct backlight_ops psb_ops = {
+       .get_brightness = psb_get_brightness,
+       .update_status  = psb_set_brightness,
+};
+
+static int psb_backlight_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int ret;
+       struct backlight_properties props;
+
+       memset(&props, 0, sizeof(struct backlight_properties));
+       props.max_brightness = 100;
+       props.type = BACKLIGHT_PLATFORM;
+
+       psb_backlight_device = backlight_device_register("psb-bl",
+                                       NULL, (void *)dev, &psb_ops, &props);
+       if (IS_ERR(psb_backlight_device))
+               return PTR_ERR(psb_backlight_device);
+
+       ret = psb_backlight_setup(dev);
+       if (ret < 0) {
+               backlight_device_unregister(psb_backlight_device);
+               psb_backlight_device = NULL;
+               return ret;
+       }
+       psb_backlight_device->props.brightness = 100;
+       psb_backlight_device->props.max_brightness = 100;
+       backlight_update_status(psb_backlight_device);
+       dev_priv->backlight_device = psb_backlight_device;
+       return 0;
+}
+
+#endif
+
+/*
+ *     Provide the Poulsbo specific chip logic and low level methods
+ *     for power management
+ */
+
+static void psb_init_pm(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       u32 gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
+       gating &= ~3;   /* Disable 2D clock gating */
+       gating |= 1;
+       PSB_WSGX32(gating, PSB_CR_CLKGATECTL);
+       PSB_RSGX32(PSB_CR_CLKGATECTL);
+}
+
+/**
+ *     psb_save_display_registers      -       save registers lost on suspend
+ *     @dev: our DRM device
+ *
+ *     Save the state we need in order to be able to restore the interface
+ *     upon resume from suspend
+ */
+static int psb_save_display_registers(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc;
+       struct drm_connector *connector;
+
+       /* Display arbitration control + watermarks */
+       dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
+       dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
+       dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
+       dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
+       dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
+       dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
+       dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
+       dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
+
+       /* Save crtc and output state */
+       mutex_lock(&dev->mode_config.mutex);
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               if (drm_helper_crtc_in_use(crtc))
+                       crtc->funcs->save(crtc);
+       }
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+               connector->funcs->save(connector);
+
+       mutex_unlock(&dev->mode_config.mutex);
+       return 0;
+}
+
+/**
+ *     psb_restore_display_registers   -       restore lost register state
+ *     @dev: our DRM device
+ *
+ *     Restore register state that was lost during suspend and resume.
+ */
+static int psb_restore_display_registers(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc;
+       struct drm_connector *connector;
+
+       /* Display arbitration + watermarks */
+       PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
+       PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
+       PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
+       PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
+       PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
+       PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
+       PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
+       PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
+
+       /*make sure VGA plane is off. it initializes to on after reset!*/
+       PSB_WVDC32(0x80000000, VGACNTRL);
+
+       mutex_lock(&dev->mode_config.mutex);
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+               if (drm_helper_crtc_in_use(crtc))
+                       crtc->funcs->restore(crtc);
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+               connector->funcs->restore(connector);
+
+       mutex_unlock(&dev->mode_config.mutex);
+       return 0;
+}
+
+static int psb_power_down(struct drm_device *dev)
+{
+       return 0;
+}
+
+static int psb_power_up(struct drm_device *dev)
+{
+       return 0;
+}
+
+static void psb_get_core_freq(struct drm_device *dev)
+{
+       uint32_t clock;
+       struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       /*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
+       /*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
+
+       pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
+       pci_read_config_dword(pci_root, 0xD4, &clock);
+       pci_dev_put(pci_root);
+
+       switch (clock & 0x07) {
+       case 0:
+               dev_priv->core_freq = 100;
+               break;
+       case 1:
+               dev_priv->core_freq = 133;
+               break;
+       case 2:
+               dev_priv->core_freq = 150;
+               break;
+       case 3:
+               dev_priv->core_freq = 178;
+               break;
+       case 4:
+               dev_priv->core_freq = 200;
+               break;
+       case 5:
+       case 6:
+       case 7:
+               dev_priv->core_freq = 266;
+       default:
+               dev_priv->core_freq = 0;
+       }
+}
+
+static int psb_chip_setup(struct drm_device *dev)
+{
+       psb_get_core_freq(dev);
+       gma_intel_setup_gmbus(dev);
+       gma_intel_opregion_init(dev);
+       psb_intel_init_bios(dev);
+       return 0;
+}
+
+static void psb_chip_teardown(struct drm_device *dev)
+{
+       gma_intel_teardown_gmbus(dev);
+}
+
+const struct psb_ops psb_chip_ops = {
+       .name = "Poulsbo",
+       .accel_2d = 1,
+       .pipes = 2,
+       .crtcs = 2,
+       .sgx_offset = PSB_SGX_OFFSET,
+       .chip_setup = psb_chip_setup,
+       .chip_teardown = psb_chip_teardown,
+
+       .crtc_helper = &psb_intel_helper_funcs,
+       .crtc_funcs = &psb_intel_crtc_funcs,
+
+       .output_init = psb_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+       .backlight_init = psb_backlight_init,
+#endif
+
+       .init_pm = psb_init_pm,
+       .save_regs = psb_save_display_registers,
+       .restore_regs = psb_restore_display_registers,
+       .power_down = psb_power_down,
+       .power_up = psb_power_up,
+};
+
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
new file mode 100644 (file)
index 0000000..f14768f
--- /dev/null
@@ -0,0 +1,703 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "framebuffer.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+#include "mid_bios.h"
+#include <drm/drm_pciids.h>
+#include "power.h"
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/pm_runtime.h>
+#include <acpi/video.h>
+#include <linux/module.h>
+
+static int drm_psb_trap_pagefaults;
+
+static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+
+MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
+module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
+
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+       { 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
+       { 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
+#if defined(CONFIG_DRM_GMA600)
+       { 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+       { 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+       { 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+       { 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+       { 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+       { 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+       { 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+       { 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+       /* Atom E620 */
+       { 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+#endif
+#if defined(CONFIG_DRM_GMA3600)
+       { 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+       { 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+       { 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+       { 0x8086, 0x0be3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+       { 0x8086, 0x0be4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+       { 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+       { 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+       { 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+#endif
+       { 0, 0, 0}
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+/*
+ * Standard IOCTLs.
+ */
+
+#define DRM_IOCTL_PSB_ADB      \
+               DRM_IOWR(DRM_GMA_ADB + DRM_COMMAND_BASE, uint32_t)
+#define DRM_IOCTL_PSB_MODE_OPERATION   \
+               DRM_IOWR(DRM_GMA_MODE_OPERATION + DRM_COMMAND_BASE, \
+                        struct drm_psb_mode_operation_arg)
+#define DRM_IOCTL_PSB_STOLEN_MEMORY    \
+               DRM_IOWR(DRM_GMA_STOLEN_MEMORY + DRM_COMMAND_BASE, \
+                        struct drm_psb_stolen_memory_arg)
+#define DRM_IOCTL_PSB_GAMMA    \
+               DRM_IOWR(DRM_GMA_GAMMA + DRM_COMMAND_BASE, \
+                        struct drm_psb_dpst_lut_arg)
+#define DRM_IOCTL_PSB_DPST_BL  \
+               DRM_IOWR(DRM_GMA_DPST_BL + DRM_COMMAND_BASE, \
+                        uint32_t)
+#define DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID    \
+               DRM_IOWR(DRM_GMA_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \
+                        struct drm_psb_get_pipe_from_crtc_id_arg)
+#define DRM_IOCTL_PSB_GEM_CREATE       \
+               DRM_IOWR(DRM_GMA_GEM_CREATE + DRM_COMMAND_BASE, \
+                        struct drm_psb_gem_create)
+#define DRM_IOCTL_PSB_GEM_MMAP \
+               DRM_IOWR(DRM_GMA_GEM_MMAP + DRM_COMMAND_BASE, \
+                        struct drm_psb_gem_mmap)
+
+static int psb_adb_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv);
+static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
+                                   struct drm_file *file_priv);
+static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
+                                  struct drm_file *file_priv);
+static int psb_gamma_ioctl(struct drm_device *dev, void *data,
+                          struct drm_file *file_priv);
+static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
+                            struct drm_file *file_priv);
+
+#define PSB_IOCTL_DEF(ioctl, func, flags) \
+       [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
+
+static struct drm_ioctl_desc psb_ioctls[] = {
+       PSB_IOCTL_DEF(DRM_IOCTL_PSB_ADB, psb_adb_ioctl, DRM_AUTH),
+       PSB_IOCTL_DEF(DRM_IOCTL_PSB_MODE_OPERATION, psb_mode_operation_ioctl,
+                     DRM_AUTH),
+       PSB_IOCTL_DEF(DRM_IOCTL_PSB_STOLEN_MEMORY, psb_stolen_memory_ioctl,
+                     DRM_AUTH),
+       PSB_IOCTL_DEF(DRM_IOCTL_PSB_GAMMA, psb_gamma_ioctl, DRM_AUTH),
+       PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
+       PSB_IOCTL_DEF(DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID,
+                                       psb_intel_get_pipe_from_crtc_id, 0),
+       PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_CREATE, psb_gem_create_ioctl,
+                                               DRM_UNLOCKED | DRM_AUTH),
+       PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_MMAP, psb_gem_mmap_ioctl,
+                                               DRM_UNLOCKED | DRM_AUTH),
+};
+
+static void psb_lastclose(struct drm_device *dev)
+{
+       return;
+}
+
+static void psb_do_takedown(struct drm_device *dev)
+{
+}
+
+static int psb_do_init(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_gtt *pg = &dev_priv->gtt;
+
+       uint32_t stolen_gtt;
+
+       int ret = -ENOMEM;
+
+       if (pg->mmu_gatt_start & 0x0FFFFFFF) {
+               dev_err(dev->dev, "Gatt must be 256M aligned. This is a bug.\n");
+               ret = -EINVAL;
+               goto out_err;
+       }
+
+
+       stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
+       stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       stolen_gtt =
+           (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
+
+       dev_priv->gatt_free_offset = pg->mmu_gatt_start +
+           (stolen_gtt << PAGE_SHIFT) * 1024;
+
+       if (1 || drm_debug) {
+               uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
+               uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
+               DRM_INFO("SGX core id = 0x%08x\n", core_id);
+               DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
+                        (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
+                        _PSB_CC_REVISION_MAJOR_SHIFT,
+                        (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
+                        _PSB_CC_REVISION_MINOR_SHIFT);
+               DRM_INFO
+                   ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
+                    (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
+                    _PSB_CC_REVISION_MAINTENANCE_SHIFT,
+                    (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
+                    _PSB_CC_REVISION_DESIGNER_SHIFT);
+       }
+
+
+       spin_lock_init(&dev_priv->irqmask_lock);
+       spin_lock_init(&dev_priv->lock_2d);
+
+       PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
+       PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
+       PSB_RSGX32(PSB_CR_BIF_BANK1);
+       PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_MMU_ER_MASK,
+                                                       PSB_CR_BIF_CTRL);
+       psb_spank(dev_priv);
+
+       /* mmu_gatt ?? */
+       PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
+       return 0;
+out_err:
+       psb_do_takedown(dev);
+       return ret;
+}
+
+static int psb_driver_unload(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       /* Kill vblank etc here */
+
+       gma_backlight_exit(dev);
+
+       psb_modeset_cleanup(dev);
+
+       if (dev_priv) {
+               psb_lid_timer_takedown(dev_priv);
+               gma_intel_opregion_exit(dev);
+
+               if (dev_priv->ops->chip_teardown)
+                       dev_priv->ops->chip_teardown(dev);
+               psb_do_takedown(dev);
+
+
+               if (dev_priv->pf_pd) {
+                       psb_mmu_free_pagedir(dev_priv->pf_pd);
+                       dev_priv->pf_pd = NULL;
+               }
+               if (dev_priv->mmu) {
+                       struct psb_gtt *pg = &dev_priv->gtt;
+
+                       down_read(&pg->sem);
+                       psb_mmu_remove_pfn_sequence(
+                               psb_mmu_get_default_pd
+                               (dev_priv->mmu),
+                               pg->mmu_gatt_start,
+                               dev_priv->vram_stolen_size >> PAGE_SHIFT);
+                       up_read(&pg->sem);
+                       psb_mmu_driver_takedown(dev_priv->mmu);
+                       dev_priv->mmu = NULL;
+               }
+               psb_gtt_takedown(dev);
+               if (dev_priv->scratch_page) {
+                       __free_page(dev_priv->scratch_page);
+                       dev_priv->scratch_page = NULL;
+               }
+               if (dev_priv->vdc_reg) {
+                       iounmap(dev_priv->vdc_reg);
+                       dev_priv->vdc_reg = NULL;
+               }
+               if (dev_priv->sgx_reg) {
+                       iounmap(dev_priv->sgx_reg);
+                       dev_priv->sgx_reg = NULL;
+               }
+
+               kfree(dev_priv);
+               dev->dev_private = NULL;
+
+               /*destroy VBT data*/
+               psb_intel_destroy_bios(dev);
+       }
+
+       gma_power_uninit(dev);
+
+       return 0;
+}
+
+
+static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
+{
+       struct drm_psb_private *dev_priv;
+       unsigned long resource_start;
+       struct psb_gtt *pg;
+       unsigned long irqflags;
+       int ret = -ENOMEM;
+       uint32_t tt_pages;
+       struct drm_connector *connector;
+       struct psb_intel_encoder *psb_intel_encoder;
+
+       dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+       if (dev_priv == NULL)
+               return -ENOMEM;
+
+       dev_priv->ops = (struct psb_ops *)chipset;
+       dev_priv->dev = dev;
+       dev->dev_private = (void *) dev_priv;
+
+       if (!IS_PSB(dev)) {
+               if (pci_enable_msi(dev->pdev))
+                       dev_warn(dev->dev, "Enabling MSI failed!\n");
+       }
+
+       dev_priv->num_pipe = dev_priv->ops->pipes;
+
+       resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
+
+       dev_priv->vdc_reg =
+           ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
+       if (!dev_priv->vdc_reg)
+               goto out_err;
+
+       dev_priv->sgx_reg = ioremap(resource_start + dev_priv->ops->sgx_offset,
+                                                       PSB_SGX_SIZE);
+       if (!dev_priv->sgx_reg)
+               goto out_err;
+
+       ret = dev_priv->ops->chip_setup(dev);
+       if (ret)
+               goto out_err;
+
+       /* Init OSPM support */
+       gma_power_init(dev);
+
+       ret = -ENOMEM;
+
+       dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
+       if (!dev_priv->scratch_page)
+               goto out_err;
+
+       set_pages_uc(dev_priv->scratch_page, 1);
+
+       ret = psb_gtt_init(dev, 0);
+       if (ret)
+               goto out_err;
+
+       dev_priv->mmu = psb_mmu_driver_init((void *)0,
+                                       drm_psb_trap_pagefaults, 0,
+                                       dev_priv);
+       if (!dev_priv->mmu)
+               goto out_err;
+
+       pg = &dev_priv->gtt;
+
+       tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
+               (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
+
+
+       dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
+       if (!dev_priv->pf_pd)
+               goto out_err;
+
+       psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
+       psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
+
+       ret = psb_do_init(dev);
+       if (ret)
+               return ret;
+
+       PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
+       PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
+
+/*     igd_opregion_init(&dev_priv->opregion_dev); */
+       acpi_video_register();
+       if (dev_priv->lid_state)
+               psb_lid_timer_init(dev_priv);
+
+       ret = drm_vblank_init(dev, dev_priv->num_pipe);
+       if (ret)
+               goto out_err;
+
+       /*
+        * Install interrupt handlers prior to powering off SGX or else we will
+        * crash.
+        */
+       dev_priv->vdc_irq_mask = 0;
+       dev_priv->pipestat[0] = 0;
+       dev_priv->pipestat[1] = 0;
+       dev_priv->pipestat[2] = 0;
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+       PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+       PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
+       PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
+       spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+       if (IS_PSB(dev) && drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_irq_install(dev);
+
+       dev->vblank_disable_allowed = 1;
+
+       dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+
+       dev->driver->get_vblank_counter = psb_get_vblank_counter;
+
+       psb_modeset_init(dev);
+       psb_fbdev_init(dev);
+       drm_kms_helper_poll_init(dev);
+
+       /* Only add backlight support if we have LVDS output */
+       list_for_each_entry(connector, &dev->mode_config.connector_list,
+                           head) {
+               psb_intel_encoder = psb_intel_attached_encoder(connector);
+
+               switch (psb_intel_encoder->type) {
+               case INTEL_OUTPUT_LVDS:
+               case INTEL_OUTPUT_MIPI:
+                       ret = gma_backlight_init(dev);
+                       break;
+               }
+       }
+
+       if (ret)
+               return ret;
+#if 0
+       /*enable runtime pm at last*/
+       pm_runtime_enable(&dev->pdev->dev);
+       pm_runtime_set_active(&dev->pdev->dev);
+#endif
+       /*Intel drm driver load is done, continue doing pvr load*/
+       return 0;
+out_err:
+       psb_driver_unload(dev);
+       return ret;
+}
+
+int psb_driver_device_is_agp(struct drm_device *dev)
+{
+       return 0;
+}
+
+static inline void get_brightness(struct backlight_device *bd)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+       if (bd) {
+               bd->props.brightness = bd->ops->get_brightness(bd);
+               backlight_update_status(bd);
+       }
+#endif
+}
+
+static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_psb_private *dev_priv = psb_priv(dev);
+       uint32_t *arg = data;
+
+       dev_priv->blc_adj2 = *arg;
+       get_brightness(dev_priv->backlight_device);
+       return 0;
+}
+
+static int psb_adb_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+       struct drm_psb_private *dev_priv = psb_priv(dev);
+       uint32_t *arg = data;
+
+       dev_priv->blc_adj1 = *arg;
+       get_brightness(dev_priv->backlight_device);
+       return 0;
+}
+
+static int psb_gamma_ioctl(struct drm_device *dev, void *data,
+                          struct drm_file *file_priv)
+{
+       struct drm_psb_dpst_lut_arg *lut_arg = data;
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc;
+       struct drm_connector *connector;
+       struct psb_intel_crtc *psb_intel_crtc;
+       int i = 0;
+       int32_t obj_id;
+
+       obj_id = lut_arg->output_id;
+       obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
+       if (!obj) {
+               dev_dbg(dev->dev, "Invalid Connector object.\n");
+               return -EINVAL;
+       }
+
+       connector = obj_to_connector(obj);
+       crtc = connector->encoder->crtc;
+       psb_intel_crtc = to_psb_intel_crtc(crtc);
+
+       for (i = 0; i < 256; i++)
+               psb_intel_crtc->lut_adj[i] = lut_arg->lut[i];
+
+       psb_intel_crtc_load_lut(crtc);
+
+       return 0;
+}
+
+static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv)
+{
+       uint32_t obj_id;
+       uint16_t op;
+       struct drm_mode_modeinfo *umode;
+       struct drm_display_mode *mode = NULL;
+       struct drm_psb_mode_operation_arg *arg;
+       struct drm_mode_object *obj;
+       struct drm_connector *connector;
+       struct drm_connector_helper_funcs *connector_funcs;
+       int ret = 0;
+       int resp = MODE_OK;
+
+       arg = (struct drm_psb_mode_operation_arg *)data;
+       obj_id = arg->obj_id;
+       op = arg->operation;
+
+       switch (op) {
+       case PSB_MODE_OPERATION_MODE_VALID:
+               umode = &arg->mode;
+
+               mutex_lock(&dev->mode_config.mutex);
+
+               obj = drm_mode_object_find(dev, obj_id,
+                                       DRM_MODE_OBJECT_CONNECTOR);
+               if (!obj) {
+                       ret = -EINVAL;
+                       goto mode_op_out;
+               }
+
+               connector = obj_to_connector(obj);
+
+               mode = drm_mode_create(dev);
+               if (!mode) {
+                       ret = -ENOMEM;
+                       goto mode_op_out;
+               }
+
+               /* drm_crtc_convert_umode(mode, umode); */
+               {
+                       mode->clock = umode->clock;
+                       mode->hdisplay = umode->hdisplay;
+                       mode->hsync_start = umode->hsync_start;
+                       mode->hsync_end = umode->hsync_end;
+                       mode->htotal = umode->htotal;
+                       mode->hskew = umode->hskew;
+                       mode->vdisplay = umode->vdisplay;
+                       mode->vsync_start = umode->vsync_start;
+                       mode->vsync_end = umode->vsync_end;
+                       mode->vtotal = umode->vtotal;
+                       mode->vscan = umode->vscan;
+                       mode->vrefresh = umode->vrefresh;
+                       mode->flags = umode->flags;
+                       mode->type = umode->type;
+                       strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN);
+                       mode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+               }
+
+               connector_funcs = (struct drm_connector_helper_funcs *)
+                                  connector->helper_private;
+
+               if (connector_funcs->mode_valid) {
+                       resp = connector_funcs->mode_valid(connector, mode);
+                       arg->data = resp;
+               }
+
+               /*do some clean up work*/
+               if (mode)
+                       drm_mode_destroy(dev, mode);
+mode_op_out:
+               mutex_unlock(&dev->mode_config.mutex);
+               return ret;
+
+       default:
+               dev_dbg(dev->dev, "Unsupported psb mode operation\n");
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
+                                  struct drm_file *file_priv)
+{
+       struct drm_psb_private *dev_priv = psb_priv(dev);
+       struct drm_psb_stolen_memory_arg *arg = data;
+
+       arg->base = dev_priv->stolen_base;
+       arg->size = dev_priv->vram_stolen_size;
+
+       return 0;
+}
+
+static int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
+{
+       return 0;
+}
+
+static void psb_driver_close(struct drm_device *dev, struct drm_file *priv)
+{
+}
+
+static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
+                              unsigned long arg)
+{
+       struct drm_file *file_priv = filp->private_data;
+       struct drm_device *dev = file_priv->minor->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       static unsigned int runtime_allowed;
+
+       if (runtime_allowed == 1 && dev_priv->is_lvds_on) {
+               runtime_allowed++;
+               pm_runtime_allow(&dev->pdev->dev);
+               dev_priv->rpm_enabled = 1;
+       }
+       return drm_ioctl(filp, cmd, arg);
+       /* FIXME: do we need to wrap the other side of this */
+}
+
+
+/* When a client dies:
+ *    - Check for and clean up flipped page state
+ */
+void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
+{
+}
+
+static void psb_remove(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       drm_put_dev(dev);
+}
+
+static const struct dev_pm_ops psb_pm_ops = {
+       .resume = gma_power_resume,
+       .suspend = gma_power_suspend,
+       .runtime_suspend = psb_runtime_suspend,
+       .runtime_resume = psb_runtime_resume,
+       .runtime_idle = psb_runtime_idle,
+};
+
+static struct vm_operations_struct psb_gem_vm_ops = {
+       .fault = psb_gem_fault,
+       .open = drm_gem_vm_open,
+       .close = drm_gem_vm_close,
+};
+
+static const struct file_operations psb_gem_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = psb_unlocked_ioctl,
+       .mmap = drm_gem_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .read = drm_read,
+};
+
+static struct drm_driver driver = {
+       .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
+                          DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM ,
+       .load = psb_driver_load,
+       .unload = psb_driver_unload,
+
+       .ioctls = psb_ioctls,
+       .num_ioctls = DRM_ARRAY_SIZE(psb_ioctls),
+       .device_is_agp = psb_driver_device_is_agp,
+       .irq_preinstall = psb_irq_preinstall,
+       .irq_postinstall = psb_irq_postinstall,
+       .irq_uninstall = psb_irq_uninstall,
+       .irq_handler = psb_irq_handler,
+       .enable_vblank = psb_enable_vblank,
+       .disable_vblank = psb_disable_vblank,
+       .get_vblank_counter = psb_get_vblank_counter,
+       .lastclose = psb_lastclose,
+       .open = psb_driver_open,
+       .preclose = psb_driver_preclose,
+       .postclose = psb_driver_close,
+       .reclaim_buffers = drm_core_reclaim_buffers,
+
+       .gem_init_object = psb_gem_init_object,
+       .gem_free_object = psb_gem_free_object,
+       .gem_vm_ops = &psb_gem_vm_ops,
+       .dumb_create = psb_gem_dumb_create,
+       .dumb_map_offset = psb_gem_dumb_map_gtt,
+       .dumb_destroy = psb_gem_dumb_destroy,
+       .fops = &psb_gem_fops,
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = PSB_DRM_DRIVER_DATE,
+       .major = PSB_DRM_DRIVER_MAJOR,
+       .minor = PSB_DRM_DRIVER_MINOR,
+       .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
+};
+
+static struct pci_driver psb_pci_driver = {
+       .name = DRIVER_NAME,
+       .id_table = pciidlist,
+       .probe = psb_probe,
+       .remove = psb_remove,
+       .driver.pm = &psb_pm_ops,
+};
+
+static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static int __init psb_init(void)
+{
+       return drm_pci_init(&driver, &psb_pci_driver);
+}
+
+static void __exit psb_exit(void)
+{
+       drm_pci_exit(&driver, &psb_pci_driver);
+}
+
+late_initcall(psb_init);
+module_exit(psb_exit);
+
+MODULE_AUTHOR("Alan Cox <alan@linux.intel.com> and others");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
new file mode 100644 (file)
index 0000000..eb1568a
--- /dev/null
@@ -0,0 +1,956 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PSB_DRV_H_
+#define _PSB_DRV_H_
+
+#include <linux/kref.h>
+
+#include <drm/drmP.h>
+#include "drm_global.h"
+#include "gem_glue.h"
+#include "gma_drm.h"
+#include "psb_reg.h"
+#include "psb_intel_drv.h"
+#include "gtt.h"
+#include "power.h"
+#include "oaktrail.h"
+
+/* Append new drm mode definition here, align with libdrm definition */
+#define DRM_MODE_SCALE_NO_SCALE        2
+
+enum {
+       CHIP_PSB_8108 = 0,              /* Poulsbo */
+       CHIP_PSB_8109 = 1,              /* Poulsbo */
+       CHIP_MRST_4100 = 2,             /* Moorestown/Oaktrail */
+       CHIP_MFLD_0130 = 3,             /* Medfield */
+};
+
+#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
+#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
+#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
+
+/*
+ * Driver definitions
+ */
+
+#define DRIVER_NAME "gma500"
+#define DRIVER_DESC "DRM driver for the Intel GMA500"
+
+#define PSB_DRM_DRIVER_DATE "2011-06-06"
+#define PSB_DRM_DRIVER_MAJOR 1
+#define PSB_DRM_DRIVER_MINOR 0
+#define PSB_DRM_DRIVER_PATCHLEVEL 0
+
+/*
+ *     Hardware offsets
+ */
+#define PSB_VDC_OFFSET          0x00000000
+#define PSB_VDC_SIZE            0x000080000
+#define MRST_MMIO_SIZE          0x0000C0000
+#define MDFLD_MMIO_SIZE          0x000100000
+#define PSB_SGX_SIZE            0x8000
+#define PSB_SGX_OFFSET          0x00040000
+#define MRST_SGX_OFFSET                 0x00080000
+/*
+ *     PCI resource identifiers
+ */
+#define PSB_MMIO_RESOURCE       0
+#define PSB_GATT_RESOURCE       2
+#define PSB_GTT_RESOURCE        3
+/*
+ *     PCI configuration
+ */
+#define PSB_GMCH_CTRL           0x52
+#define PSB_BSM                         0x5C
+#define _PSB_GMCH_ENABLED       0x4
+#define PSB_PGETBL_CTL          0x2020
+#define _PSB_PGETBL_ENABLED     0x00000001
+#define PSB_SGX_2D_SLAVE_PORT   0x4000
+
+/* To get rid of */
+#define PSB_TT_PRIV0_LIMIT      (256*1024*1024)
+#define PSB_TT_PRIV0_PLIMIT     (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
+
+/*
+ *     SGX side MMU definitions (these can probably go)
+ */
+
+/*
+ *     Flags for external memory type field.
+ */
+#define PSB_MMU_CACHED_MEMORY    0x0001        /* Bind to MMU only */
+#define PSB_MMU_RO_MEMORY        0x0002        /* MMU RO memory */
+#define PSB_MMU_WO_MEMORY        0x0004        /* MMU WO memory */
+/*
+ *     PTE's and PDE's
+ */
+#define PSB_PDE_MASK             0x003FFFFF
+#define PSB_PDE_SHIFT            22
+#define PSB_PTE_SHIFT            12
+/*
+ *     Cache control
+ */
+#define PSB_PTE_VALID            0x0001        /* PTE / PDE valid */
+#define PSB_PTE_WO               0x0002        /* Write only */
+#define PSB_PTE_RO               0x0004        /* Read only */
+#define PSB_PTE_CACHED           0x0008        /* CPU cache coherent */
+
+/*
+ *     VDC registers and bits
+ */
+#define PSB_MSVDX_CLOCKGATING    0x2064
+#define PSB_TOPAZ_CLOCKGATING    0x2068
+#define PSB_HWSTAM               0x2098
+#define PSB_INSTPM               0x20C0
+#define PSB_INT_IDENTITY_R        0x20A4
+#define _MDFLD_PIPEC_EVENT_FLAG   (1<<2)
+#define _MDFLD_PIPEC_VBLANK_FLAG  (1<<3)
+#define _PSB_DPST_PIPEB_FLAG      (1<<4)
+#define _MDFLD_PIPEB_EVENT_FLAG   (1<<4)
+#define _PSB_VSYNC_PIPEB_FLAG    (1<<5)
+#define _PSB_DPST_PIPEA_FLAG      (1<<6)
+#define _PSB_PIPEA_EVENT_FLAG     (1<<6)
+#define _PSB_VSYNC_PIPEA_FLAG    (1<<7)
+#define _MDFLD_MIPIA_FLAG        (1<<16)
+#define _MDFLD_MIPIC_FLAG        (1<<17)
+#define _PSB_IRQ_SGX_FLAG        (1<<18)
+#define _PSB_IRQ_MSVDX_FLAG      (1<<19)
+#define _LNC_IRQ_TOPAZ_FLAG      (1<<20)
+
+#define _PSB_PIPE_EVENT_FLAG   (_PSB_VSYNC_PIPEA_FLAG | \
+                                _PSB_VSYNC_PIPEB_FLAG)
+
+/* This flag includes all the display IRQ bits excepts the vblank irqs. */
+#define _MDFLD_DISP_ALL_IRQ_FLAG (_MDFLD_PIPEC_EVENT_FLAG | \
+                                 _MDFLD_PIPEB_EVENT_FLAG | \
+                                 _PSB_PIPEA_EVENT_FLAG | \
+                                 _PSB_VSYNC_PIPEA_FLAG | \
+                                 _MDFLD_MIPIA_FLAG | \
+                                 _MDFLD_MIPIC_FLAG)
+#define PSB_INT_IDENTITY_R       0x20A4
+#define PSB_INT_MASK_R           0x20A8
+#define PSB_INT_ENABLE_R         0x20A0
+
+#define _PSB_MMU_ER_MASK      0x0001FF00
+#define _PSB_MMU_ER_HOST      (1 << 16)
+#define GPIOA                  0x5010
+#define GPIOB                  0x5014
+#define GPIOC                  0x5018
+#define GPIOD                  0x501c
+#define GPIOE                  0x5020
+#define GPIOF                  0x5024
+#define GPIOG                  0x5028
+#define GPIOH                  0x502c
+#define GPIO_CLOCK_DIR_MASK            (1 << 0)
+#define GPIO_CLOCK_DIR_IN              (0 << 1)
+#define GPIO_CLOCK_DIR_OUT             (1 << 1)
+#define GPIO_CLOCK_VAL_MASK            (1 << 2)
+#define GPIO_CLOCK_VAL_OUT             (1 << 3)
+#define GPIO_CLOCK_VAL_IN              (1 << 4)
+#define GPIO_CLOCK_PULLUP_DISABLE      (1 << 5)
+#define GPIO_DATA_DIR_MASK             (1 << 8)
+#define GPIO_DATA_DIR_IN               (0 << 9)
+#define GPIO_DATA_DIR_OUT              (1 << 9)
+#define GPIO_DATA_VAL_MASK             (1 << 10)
+#define GPIO_DATA_VAL_OUT              (1 << 11)
+#define GPIO_DATA_VAL_IN               (1 << 12)
+#define GPIO_DATA_PULLUP_DISABLE       (1 << 13)
+
+#define VCLK_DIVISOR_VGA0   0x6000
+#define VCLK_DIVISOR_VGA1   0x6004
+#define VCLK_POST_DIV      0x6010
+
+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
+#define PSB_COMM_USER_IRQ (1024 >> 2)
+#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
+#define PSB_COMM_FW (2048 >> 2)
+
+#define PSB_UIRQ_VISTEST              1
+#define PSB_UIRQ_OOM_REPLY            2
+#define PSB_UIRQ_FIRE_TA_REPLY        3
+#define PSB_UIRQ_FIRE_RASTER_REPLY     4
+
+#define PSB_2D_SIZE (256*1024*1024)
+#define PSB_MAX_RELOC_PAGES 1024
+
+#define PSB_LOW_REG_OFFS 0x0204
+#define PSB_HIGH_REG_OFFS 0x0600
+
+#define PSB_NUM_VBLANKS 2
+
+
+#define PSB_2D_SIZE (256*1024*1024)
+#define PSB_MAX_RELOC_PAGES 1024
+
+#define PSB_LOW_REG_OFFS 0x0204
+#define PSB_HIGH_REG_OFFS 0x0600
+
+#define PSB_NUM_VBLANKS 2
+#define PSB_WATCHDOG_DELAY (DRM_HZ * 2)
+#define PSB_LID_DELAY (DRM_HZ / 10)
+
+#define MDFLD_PNW_B0 0x04
+#define MDFLD_PNW_C0 0x08
+
+#define MDFLD_DSR_2D_3D_0      (1 << 0)
+#define MDFLD_DSR_2D_3D_2      (1 << 1)
+#define MDFLD_DSR_CURSOR_0     (1 << 2)
+#define MDFLD_DSR_CURSOR_2     (1 << 3)
+#define MDFLD_DSR_OVERLAY_0    (1 << 4)
+#define MDFLD_DSR_OVERLAY_2    (1 << 5)
+#define MDFLD_DSR_MIPI_CONTROL (1 << 6)
+#define MDFLD_DSR_DAMAGE_MASK_0        ((1 << 0) | (1 << 2) | (1 << 4))
+#define MDFLD_DSR_DAMAGE_MASK_2        ((1 << 1) | (1 << 3) | (1 << 5))
+#define MDFLD_DSR_2D_3D        (MDFLD_DSR_2D_3D_0 | MDFLD_DSR_2D_3D_2)
+
+#define MDFLD_DSR_RR           45
+#define MDFLD_DPU_ENABLE       (1 << 31)
+#define MDFLD_DSR_FULLSCREEN   (1 << 30)
+#define MDFLD_DSR_DELAY                (DRM_HZ / MDFLD_DSR_RR)
+
+#define PSB_PWR_STATE_ON               1
+#define PSB_PWR_STATE_OFF              2
+
+#define PSB_PMPOLICY_NOPM              0
+#define PSB_PMPOLICY_CLOCKGATING       1
+#define PSB_PMPOLICY_POWERDOWN         2
+
+#define PSB_PMSTATE_POWERUP            0
+#define PSB_PMSTATE_CLOCKGATED         1
+#define PSB_PMSTATE_POWERDOWN          2
+#define PSB_PCIx_MSI_ADDR_LOC          0x94
+#define PSB_PCIx_MSI_DATA_LOC          0x98
+
+/* Medfield crystal settings */
+#define KSEL_CRYSTAL_19 1
+#define KSEL_BYPASS_19 5
+#define KSEL_BYPASS_25 6
+#define KSEL_BYPASS_83_100 7
+
+struct opregion_header;
+struct opregion_acpi;
+struct opregion_swsci;
+struct opregion_asle;
+
+struct psb_intel_opregion {
+       struct opregion_header *header;
+       struct opregion_acpi *acpi;
+       struct opregion_swsci *swsci;
+       struct opregion_asle *asle;
+       int enabled;
+};
+
+struct sdvo_device_mapping {
+       u8 initialized;
+       u8 dvo_port;
+       u8 slave_addr;
+       u8 dvo_wiring;
+       u8 i2c_pin;
+       u8 i2c_speed;
+       u8 ddc_pin;
+};
+
+struct intel_gmbus {
+       struct i2c_adapter adapter;
+       struct i2c_adapter *force_bit;
+       u32 reg0;
+};
+
+struct psb_ops;
+
+#define PSB_NUM_PIPE           3
+
+struct drm_psb_private {
+       struct drm_device *dev;
+       const struct psb_ops *ops;
+
+       struct psb_gtt gtt;
+
+       /* GTT Memory manager */
+       struct psb_gtt_mm *gtt_mm;
+       struct page *scratch_page;
+       u32 *gtt_map;
+       uint32_t stolen_base;
+       void *vram_addr;
+       unsigned long vram_stolen_size;
+       int gtt_initialized;
+       u16 gmch_ctrl;          /* Saved GTT setup */
+       u32 pge_ctl;
+
+       struct mutex gtt_mutex;
+       struct resource *gtt_mem;       /* Our PCI resource */
+
+       struct psb_mmu_driver *mmu;
+       struct psb_mmu_pd *pf_pd;
+
+       /*
+        * Register base
+        */
+
+       uint8_t *sgx_reg;
+       uint8_t *vdc_reg;
+       uint32_t gatt_free_offset;
+
+       /*
+        * Fencing / irq.
+        */
+
+       uint32_t vdc_irq_mask;
+       uint32_t pipestat[PSB_NUM_PIPE];
+
+       spinlock_t irqmask_lock;
+
+       /*
+        * Power
+        */
+
+       bool suspended;
+       bool display_power;
+       int display_count;
+
+       /*
+        * Modesetting
+        */
+       struct psb_intel_mode_device mode_dev;
+
+       struct drm_crtc *plane_to_crtc_mapping[PSB_NUM_PIPE];
+       struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
+       uint32_t num_pipe;
+
+       /*
+        * OSPM info (Power management base) (can go ?)
+        */
+       uint32_t ospm_base;
+
+       /*
+        * Sizes info
+        */
+
+       u32 fuse_reg_value;
+       u32 video_device_fuse;
+
+       /* PCI revision ID for B0:D2:F0 */
+       uint8_t platform_rev_id;
+
+       /* gmbus */
+       struct intel_gmbus *gmbus;
+
+       /* Used by SDVO */
+       int crt_ddc_pin;
+       /* FIXME: The mappings should be parsed from bios but for now we can
+                 pretend there are no mappings available */
+       struct sdvo_device_mapping sdvo_mappings[2];
+       u32 hotplug_supported_mask;
+       struct drm_property *broadcast_rgb_property;
+       struct drm_property *force_audio_property;
+
+       /*
+        * LVDS info
+        */
+       int backlight_duty_cycle;       /* restore backlight to this value */
+       bool panel_wants_dither;
+       struct drm_display_mode *panel_fixed_mode;
+       struct drm_display_mode *lfp_lvds_vbt_mode;
+       struct drm_display_mode *sdvo_lvds_vbt_mode;
+
+       struct bdb_lvds_backlight *lvds_bl; /* LVDS backlight info from VBT */
+       struct psb_intel_i2c_chan *lvds_i2c_bus; /* FIXME: Remove this? */
+
+       /* Feature bits from the VBIOS */
+       unsigned int int_tv_support:1;
+       unsigned int lvds_dither:1;
+       unsigned int lvds_vbt:1;
+       unsigned int int_crt_support:1;
+       unsigned int lvds_use_ssc:1;
+       int lvds_ssc_freq;
+       bool is_lvds_on;
+       bool is_mipi_on;
+       u32 mipi_ctrl_display;
+
+       unsigned int core_freq;
+       uint32_t iLVDS_enable;
+
+       /* Runtime PM state */
+       int rpm_enabled;
+
+       /* MID specific */
+       struct oaktrail_vbt vbt_data;
+       struct oaktrail_gct_data gct_data;
+
+       /* MIPI Panel type etc */
+       int panel_id;
+       bool dual_mipi;         /* dual display - DPI & DBI */
+       bool dpi_panel_on;      /* The DPI panel power is on */
+       bool dpi_panel_on2;     /* The DPI panel power is on */
+       bool dbi_panel_on;      /* The DBI panel power is on */
+       bool dbi_panel_on2;     /* The DBI panel power is on */
+       u32 dsr_fb_update;      /* DSR FB update counter */
+
+       /* Moorestown HDMI state */
+       struct oaktrail_hdmi_dev *hdmi_priv;
+
+       /* Moorestown pipe config register value cache */
+       uint32_t pipeconf;
+       uint32_t pipeconf1;
+       uint32_t pipeconf2;
+
+       /* Moorestown plane control register value cache */
+       uint32_t dspcntr;
+       uint32_t dspcntr1;
+       uint32_t dspcntr2;
+
+       /* Moorestown MM backlight cache */
+       uint8_t saveBKLTCNT;
+       uint8_t saveBKLTREQ;
+       uint8_t saveBKLTBRTL;
+
+       /*
+        * Register state
+        */
+       uint32_t saveDSPACNTR;
+       uint32_t saveDSPBCNTR;
+       uint32_t savePIPEACONF;
+       uint32_t savePIPEBCONF;
+       uint32_t savePIPEASRC;
+       uint32_t savePIPEBSRC;
+       uint32_t saveFPA0;
+       uint32_t saveFPA1;
+       uint32_t saveDPLL_A;
+       uint32_t saveDPLL_A_MD;
+       uint32_t saveHTOTAL_A;
+       uint32_t saveHBLANK_A;
+       uint32_t saveHSYNC_A;
+       uint32_t saveVTOTAL_A;
+       uint32_t saveVBLANK_A;
+       uint32_t saveVSYNC_A;
+       uint32_t saveDSPASTRIDE;
+       uint32_t saveDSPASIZE;
+       uint32_t saveDSPAPOS;
+       uint32_t saveDSPABASE;
+       uint32_t saveDSPASURF;
+       uint32_t saveDSPASTATUS;
+       uint32_t saveFPB0;
+       uint32_t saveFPB1;
+       uint32_t saveDPLL_B;
+       uint32_t saveDPLL_B_MD;
+       uint32_t saveHTOTAL_B;
+       uint32_t saveHBLANK_B;
+       uint32_t saveHSYNC_B;
+       uint32_t saveVTOTAL_B;
+       uint32_t saveVBLANK_B;
+       uint32_t saveVSYNC_B;
+       uint32_t saveDSPBSTRIDE;
+       uint32_t saveDSPBSIZE;
+       uint32_t saveDSPBPOS;
+       uint32_t saveDSPBBASE;
+       uint32_t saveDSPBSURF;
+       uint32_t saveDSPBSTATUS;
+       uint32_t saveVCLK_DIVISOR_VGA0;
+       uint32_t saveVCLK_DIVISOR_VGA1;
+       uint32_t saveVCLK_POST_DIV;
+       uint32_t saveVGACNTRL;
+       uint32_t saveADPA;
+       uint32_t saveLVDS;
+       uint32_t saveDVOA;
+       uint32_t saveDVOB;
+       uint32_t saveDVOC;
+       uint32_t savePP_ON;
+       uint32_t savePP_OFF;
+       uint32_t savePP_CONTROL;
+       uint32_t savePP_CYCLE;
+       uint32_t savePFIT_CONTROL;
+       uint32_t savePaletteA[256];
+       uint32_t savePaletteB[256];
+       uint32_t saveBLC_PWM_CTL2;
+       uint32_t saveBLC_PWM_CTL;
+       uint32_t saveCLOCKGATING;
+       uint32_t saveDSPARB;
+       uint32_t saveDSPATILEOFF;
+       uint32_t saveDSPBTILEOFF;
+       uint32_t saveDSPAADDR;
+       uint32_t saveDSPBADDR;
+       uint32_t savePFIT_AUTO_RATIOS;
+       uint32_t savePFIT_PGM_RATIOS;
+       uint32_t savePP_ON_DELAYS;
+       uint32_t savePP_OFF_DELAYS;
+       uint32_t savePP_DIVISOR;
+       uint32_t saveBSM;
+       uint32_t saveVBT;
+       uint32_t saveBCLRPAT_A;
+       uint32_t saveBCLRPAT_B;
+       uint32_t saveDSPALINOFF;
+       uint32_t saveDSPBLINOFF;
+       uint32_t savePERF_MODE;
+       uint32_t saveDSPFW1;
+       uint32_t saveDSPFW2;
+       uint32_t saveDSPFW3;
+       uint32_t saveDSPFW4;
+       uint32_t saveDSPFW5;
+       uint32_t saveDSPFW6;
+       uint32_t saveCHICKENBIT;
+       uint32_t saveDSPACURSOR_CTRL;
+       uint32_t saveDSPBCURSOR_CTRL;
+       uint32_t saveDSPACURSOR_BASE;
+       uint32_t saveDSPBCURSOR_BASE;
+       uint32_t saveDSPACURSOR_POS;
+       uint32_t saveDSPBCURSOR_POS;
+       uint32_t save_palette_a[256];
+       uint32_t save_palette_b[256];
+       uint32_t saveOV_OVADD;
+       uint32_t saveOV_OGAMC0;
+       uint32_t saveOV_OGAMC1;
+       uint32_t saveOV_OGAMC2;
+       uint32_t saveOV_OGAMC3;
+       uint32_t saveOV_OGAMC4;
+       uint32_t saveOV_OGAMC5;
+       uint32_t saveOVC_OVADD;
+       uint32_t saveOVC_OGAMC0;
+       uint32_t saveOVC_OGAMC1;
+       uint32_t saveOVC_OGAMC2;
+       uint32_t saveOVC_OGAMC3;
+       uint32_t saveOVC_OGAMC4;
+       uint32_t saveOVC_OGAMC5;
+
+       /* MSI reg save */
+       uint32_t msi_addr;
+       uint32_t msi_data;
+
+       /* Medfield specific register save state */
+       uint32_t saveHDMIPHYMISCCTL;
+       uint32_t saveHDMIB_CONTROL;
+       uint32_t saveDSPCCNTR;
+       uint32_t savePIPECCONF;
+       uint32_t savePIPECSRC;
+       uint32_t saveHTOTAL_C;
+       uint32_t saveHBLANK_C;
+       uint32_t saveHSYNC_C;
+       uint32_t saveVTOTAL_C;
+       uint32_t saveVBLANK_C;
+       uint32_t saveVSYNC_C;
+       uint32_t saveDSPCSTRIDE;
+       uint32_t saveDSPCSIZE;
+       uint32_t saveDSPCPOS;
+       uint32_t saveDSPCSURF;
+       uint32_t saveDSPCSTATUS;
+       uint32_t saveDSPCLINOFF;
+       uint32_t saveDSPCTILEOFF;
+       uint32_t saveDSPCCURSOR_CTRL;
+       uint32_t saveDSPCCURSOR_BASE;
+       uint32_t saveDSPCCURSOR_POS;
+       uint32_t save_palette_c[256];
+       uint32_t saveOV_OVADD_C;
+       uint32_t saveOV_OGAMC0_C;
+       uint32_t saveOV_OGAMC1_C;
+       uint32_t saveOV_OGAMC2_C;
+       uint32_t saveOV_OGAMC3_C;
+       uint32_t saveOV_OGAMC4_C;
+       uint32_t saveOV_OGAMC5_C;
+
+       /* DSI register save */
+       uint32_t saveDEVICE_READY_REG;
+       uint32_t saveINTR_EN_REG;
+       uint32_t saveDSI_FUNC_PRG_REG;
+       uint32_t saveHS_TX_TIMEOUT_REG;
+       uint32_t saveLP_RX_TIMEOUT_REG;
+       uint32_t saveTURN_AROUND_TIMEOUT_REG;
+       uint32_t saveDEVICE_RESET_REG;
+       uint32_t saveDPI_RESOLUTION_REG;
+       uint32_t saveHORIZ_SYNC_PAD_COUNT_REG;
+       uint32_t saveHORIZ_BACK_PORCH_COUNT_REG;
+       uint32_t saveHORIZ_FRONT_PORCH_COUNT_REG;
+       uint32_t saveHORIZ_ACTIVE_AREA_COUNT_REG;
+       uint32_t saveVERT_SYNC_PAD_COUNT_REG;
+       uint32_t saveVERT_BACK_PORCH_COUNT_REG;
+       uint32_t saveVERT_FRONT_PORCH_COUNT_REG;
+       uint32_t saveHIGH_LOW_SWITCH_COUNT_REG;
+       uint32_t saveINIT_COUNT_REG;
+       uint32_t saveMAX_RET_PAK_REG;
+       uint32_t saveVIDEO_FMT_REG;
+       uint32_t saveEOT_DISABLE_REG;
+       uint32_t saveLP_BYTECLK_REG;
+       uint32_t saveHS_LS_DBI_ENABLE_REG;
+       uint32_t saveTXCLKESC_REG;
+       uint32_t saveDPHY_PARAM_REG;
+       uint32_t saveMIPI_CONTROL_REG;
+       uint32_t saveMIPI;
+       uint32_t saveMIPI_C;
+
+       /* DPST register save */
+       uint32_t saveHISTOGRAM_INT_CONTROL_REG;
+       uint32_t saveHISTOGRAM_LOGIC_CONTROL_REG;
+       uint32_t savePWM_CONTROL_LOGIC;
+
+       /*
+        * DSI info. 
+        */
+       void * dbi_dsr_info;    
+       void * dbi_dpu_info;
+       void * dsi_configs[2];
+       /*
+        * LID-Switch
+        */
+       spinlock_t lid_lock;
+       struct timer_list lid_timer;
+       struct psb_intel_opregion opregion;
+       u32 *lid_state;
+       u32 lid_last_state;
+
+       /*
+        * Watchdog
+        */
+
+       uint32_t apm_reg;
+       uint16_t apm_base;
+
+       /*
+        * Used for modifying backlight from
+        * xrandr -- consider removing and using HAL instead
+        */
+       struct backlight_device *backlight_device;
+       struct drm_property *backlight_property;
+       uint32_t blc_adj1;
+       uint32_t blc_adj2;
+
+       void *fbdev;
+
+       /* 2D acceleration */
+       spinlock_t lock_2d;
+};
+
+
+/*
+ *     Operations for each board type
+ */
+struct psb_ops {
+       const char *name;
+       unsigned int accel_2d:1;
+       int pipes;              /* Number of output pipes */
+       int crtcs;              /* Number of CRTCs */
+       int sgx_offset;         /* Base offset of SGX device */
+
+       /* Sub functions */
+       struct drm_crtc_helper_funcs const *crtc_helper;
+       struct drm_crtc_funcs const *crtc_funcs;
+
+       /* Setup hooks */
+       int (*chip_setup)(struct drm_device *dev);
+       void (*chip_teardown)(struct drm_device *dev);
+
+       /* Display management hooks */
+       int (*output_init)(struct drm_device *dev);
+       /* Power management hooks */
+       void (*init_pm)(struct drm_device *dev);
+       int (*save_regs)(struct drm_device *dev);
+       int (*restore_regs)(struct drm_device *dev);
+       int (*power_up)(struct drm_device *dev);
+       int (*power_down)(struct drm_device *dev);
+
+       void (*lvds_bl_power)(struct drm_device *dev, bool on);
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+       /* Backlight */
+       int (*backlight_init)(struct drm_device *dev);
+#endif
+       int i2c_bus;            /* I2C bus identifier for Moorestown */
+};
+
+
+
+struct psb_mmu_driver;
+
+extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
+extern int drm_pick_crtcs(struct drm_device *dev);
+
+static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
+{
+       return (struct drm_psb_private *) dev->dev_private;
+}
+
+/*
+ * MMU stuff.
+ */
+
+extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
+                                       int trap_pagefaults,
+                                       int invalid_type,
+                                       struct drm_psb_private *dev_priv);
+extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
+extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
+                                                *driver);
+extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
+                              uint32_t gtt_start, uint32_t gtt_pages);
+extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
+                                          int trap_pagefaults,
+                                          int invalid_type);
+extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
+extern void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot);
+extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
+                                       unsigned long address,
+                                       uint32_t num_pages);
+extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
+                                      uint32_t start_pfn,
+                                      unsigned long address,
+                                      uint32_t num_pages, int type);
+extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
+                                 unsigned long *pfn);
+
+/*
+ * Enable / disable MMU for different requestors.
+ */
+
+
+extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
+extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
+                               unsigned long address, uint32_t num_pages,
+                               uint32_t desired_tile_stride,
+                               uint32_t hw_tile_stride, int type);
+extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
+                                unsigned long address, uint32_t num_pages,
+                                uint32_t desired_tile_stride,
+                                uint32_t hw_tile_stride);
+/*
+ *psb_irq.c
+ */
+
+extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+extern int psb_irq_enable_dpst(struct drm_device *dev);
+extern int psb_irq_disable_dpst(struct drm_device *dev);
+extern void psb_irq_preinstall(struct drm_device *dev);
+extern int psb_irq_postinstall(struct drm_device *dev);
+extern void psb_irq_uninstall(struct drm_device *dev);
+extern void psb_irq_turn_on_dpst(struct drm_device *dev);
+extern void psb_irq_turn_off_dpst(struct drm_device *dev);
+
+extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
+extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
+extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
+extern int psb_enable_vblank(struct drm_device *dev, int crtc);
+extern void psb_disable_vblank(struct drm_device *dev, int crtc);
+void
+psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
+
+void
+psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
+
+extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
+
+/*
+ * intel_opregion.c
+ */
+extern int gma_intel_opregion_init(struct drm_device *dev);
+extern int gma_intel_opregion_exit(struct drm_device *dev);
+
+/*
+ * framebuffer.c
+ */
+extern int psbfb_probed(struct drm_device *dev);
+extern int psbfb_remove(struct drm_device *dev,
+                       struct drm_framebuffer *fb);
+/*
+ * accel_2d.c
+ */
+extern void psbfb_copyarea(struct fb_info *info,
+                                       const struct fb_copyarea *region);
+extern int psbfb_sync(struct fb_info *info);
+extern void psb_spank(struct drm_psb_private *dev_priv);
+
+/*
+ * psb_reset.c
+ */
+
+extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
+extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
+extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
+
+/* modesetting */
+extern void psb_modeset_init(struct drm_device *dev);
+extern void psb_modeset_cleanup(struct drm_device *dev);
+extern int psb_fbdev_init(struct drm_device *dev);
+
+/* backlight.c */
+int gma_backlight_init(struct drm_device *dev);
+void gma_backlight_exit(struct drm_device *dev);
+
+/* oaktrail_crtc.c */
+extern const struct drm_crtc_helper_funcs oaktrail_helper_funcs;
+
+/* oaktrail_lvds.c */
+extern void oaktrail_lvds_init(struct drm_device *dev,
+                   struct psb_intel_mode_device *mode_dev);
+
+/* psb_intel_display.c */
+extern const struct drm_crtc_helper_funcs psb_intel_helper_funcs;
+extern const struct drm_crtc_funcs psb_intel_crtc_funcs;
+
+/* psb_intel_lvds.c */
+extern const struct drm_connector_helper_funcs
+                                       psb_intel_lvds_connector_helper_funcs;
+extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
+
+/* gem.c */
+extern int psb_gem_init_object(struct drm_gem_object *obj);
+extern void psb_gem_free_object(struct drm_gem_object *obj);
+extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
+                       struct drm_file *file);
+extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+                       struct drm_mode_create_dumb *args);
+extern int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+                       uint32_t handle);
+extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
+                       uint32_t handle, uint64_t *offset);
+extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+extern int psb_gem_create_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file);
+extern int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
+                                       struct drm_file *file);
+
+/* psb_device.c */
+extern const struct psb_ops psb_chip_ops;
+
+/* oaktrail_device.c */
+extern const struct psb_ops oaktrail_chip_ops;
+
+/* cdv_device.c */
+extern const struct psb_ops cdv_chip_ops;
+
+/*
+ * Debug print bits setting
+ */
+#define PSB_D_GENERAL (1 << 0)
+#define PSB_D_INIT    (1 << 1)
+#define PSB_D_IRQ     (1 << 2)
+#define PSB_D_ENTRY   (1 << 3)
+/* debug the get H/V BP/FP count */
+#define PSB_D_HV      (1 << 4)
+#define PSB_D_DBI_BF  (1 << 5)
+#define PSB_D_PM      (1 << 6)
+#define PSB_D_RENDER  (1 << 7)
+#define PSB_D_REG     (1 << 8)
+#define PSB_D_MSVDX   (1 << 9)
+#define PSB_D_TOPAZ   (1 << 10)
+
+extern int drm_psb_no_fb;
+extern int drm_idle_check_interval;
+
+/*
+ *     Utilities
+ */
+
+static inline u32 MRST_MSG_READ32(uint port, uint offset)
+{
+       int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
+       uint32_t ret_val = 0;
+       struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+       pci_write_config_dword(pci_root, 0xD0, mcr);
+       pci_read_config_dword(pci_root, 0xD4, &ret_val);
+       pci_dev_put(pci_root);
+       return ret_val;
+}
+static inline void MRST_MSG_WRITE32(uint port, uint offset, u32 value)
+{
+       int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
+       struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+       pci_write_config_dword(pci_root, 0xD4, value);
+       pci_write_config_dword(pci_root, 0xD0, mcr);
+       pci_dev_put(pci_root);
+}
+static inline u32 MDFLD_MSG_READ32(uint port, uint offset)
+{
+       int mcr = (0x10<<24) | (port << 16) | (offset << 8);
+       uint32_t ret_val = 0;
+       struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+       pci_write_config_dword(pci_root, 0xD0, mcr);
+       pci_read_config_dword(pci_root, 0xD4, &ret_val);
+       pci_dev_put(pci_root);
+       return ret_val;
+}
+static inline void MDFLD_MSG_WRITE32(uint port, uint offset, u32 value)
+{
+       int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
+       struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+       pci_write_config_dword(pci_root, 0xD4, value);
+       pci_write_config_dword(pci_root, 0xD0, mcr);
+       pci_dev_put(pci_root);
+}
+
+static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       return ioread32(dev_priv->vdc_reg + reg);
+}
+
+#define REG_READ(reg)         REGISTER_READ(dev, (reg))
+
+static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
+                                     uint32_t val)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       iowrite32((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE(reg, val)    REGISTER_WRITE(dev, (reg), (val))
+
+static inline void REGISTER_WRITE16(struct drm_device *dev,
+                                       uint32_t reg, uint32_t val)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       iowrite16((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE16(reg, val)    REGISTER_WRITE16(dev, (reg), (val))
+
+static inline void REGISTER_WRITE8(struct drm_device *dev,
+                                      uint32_t reg, uint32_t val)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       iowrite8((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE8(reg, val)           REGISTER_WRITE8(dev, (reg), (val))
+
+#define PSB_WVDC32(_val, _offs)                iowrite32(_val, dev_priv->vdc_reg + (_offs))
+#define PSB_RVDC32(_offs)              ioread32(dev_priv->vdc_reg + (_offs))
+
+/* #define TRAP_SGX_PM_FAULT 1 */
+#ifdef TRAP_SGX_PM_FAULT
+#define PSB_RSGX32(_offs)                                              \
+({                                                                     \
+       if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) {              \
+               printk(KERN_ERR                                         \
+                       "access sgx when it's off!! (READ) %s, %d\n",   \
+              __FILE__, __LINE__);                                     \
+               melay(1000);                                            \
+       }                                                               \
+       ioread32(dev_priv->sgx_reg + (_offs));                          \
+})
+#else
+#define PSB_RSGX32(_offs)              ioread32(dev_priv->sgx_reg + (_offs))
+#endif
+#define PSB_WSGX32(_val, _offs)                iowrite32(_val, dev_priv->sgx_reg + (_offs))
+
+#define MSVDX_REG_DUMP 0
+
+#define PSB_WMSVDX32(_val, _offs)      iowrite32(_val, dev_priv->msvdx_reg + (_offs))
+#define PSB_RMSVDX32(_offs)            ioread32(dev_priv->msvdx_reg + (_offs))
+
+#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
new file mode 100644 (file)
index 0000000..49e9835
--- /dev/null
@@ -0,0 +1,1446 @@
+/*
+ * Copyright Ã‚© 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include "framebuffer.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_display.h"
+#include "power.h"
+
+struct psb_intel_clock_t {
+       /* given values */
+       int n;
+       int m1, m2;
+       int p1, p2;
+       /* derived values */
+       int dot;
+       int vco;
+       int m;
+       int p;
+};
+
+struct psb_intel_range_t {
+       int min, max;
+};
+
+struct psb_intel_p2_t {
+       int dot_limit;
+       int p2_slow, p2_fast;
+};
+
+#define INTEL_P2_NUM                 2
+
+struct psb_intel_limit_t {
+       struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
+       struct psb_intel_p2_t p2;
+};
+
+#define I8XX_DOT_MIN             25000
+#define I8XX_DOT_MAX            350000
+#define I8XX_VCO_MIN            930000
+#define I8XX_VCO_MAX           1400000
+#define I8XX_N_MIN                   3
+#define I8XX_N_MAX                  16
+#define I8XX_M_MIN                  96
+#define I8XX_M_MAX                 140
+#define I8XX_M1_MIN                 18
+#define I8XX_M1_MAX                 26
+#define I8XX_M2_MIN                  6
+#define I8XX_M2_MAX                 16
+#define I8XX_P_MIN                   4
+#define I8XX_P_MAX                 128
+#define I8XX_P1_MIN                  2
+#define I8XX_P1_MAX                 33
+#define I8XX_P1_LVDS_MIN             1
+#define I8XX_P1_LVDS_MAX             6
+#define I8XX_P2_SLOW                 4
+#define I8XX_P2_FAST                 2
+#define I8XX_P2_LVDS_SLOW            14
+#define I8XX_P2_LVDS_FAST            14        /* No fast option */
+#define I8XX_P2_SLOW_LIMIT      165000
+
+#define I9XX_DOT_MIN             20000
+#define I9XX_DOT_MAX            400000
+#define I9XX_VCO_MIN           1400000
+#define I9XX_VCO_MAX           2800000
+#define I9XX_N_MIN                   3
+#define I9XX_N_MAX                   8
+#define I9XX_M_MIN                  70
+#define I9XX_M_MAX                 120
+#define I9XX_M1_MIN                 10
+#define I9XX_M1_MAX                 20
+#define I9XX_M2_MIN                  5
+#define I9XX_M2_MAX                  9
+#define I9XX_P_SDVO_DAC_MIN          5
+#define I9XX_P_SDVO_DAC_MAX         80
+#define I9XX_P_LVDS_MIN                      7
+#define I9XX_P_LVDS_MAX                     98
+#define I9XX_P1_MIN                  1
+#define I9XX_P1_MAX                  8
+#define I9XX_P2_SDVO_DAC_SLOW               10
+#define I9XX_P2_SDVO_DAC_FAST                5
+#define I9XX_P2_SDVO_DAC_SLOW_LIMIT     200000
+#define I9XX_P2_LVDS_SLOW                   14
+#define I9XX_P2_LVDS_FAST                    7
+#define I9XX_P2_LVDS_SLOW_LIMIT                 112000
+
+#define INTEL_LIMIT_I8XX_DVO_DAC    0
+#define INTEL_LIMIT_I8XX_LVDS      1
+#define INTEL_LIMIT_I9XX_SDVO_DAC   2
+#define INTEL_LIMIT_I9XX_LVDS      3
+
+static const struct psb_intel_limit_t psb_intel_limits[] = {
+       {                       /* INTEL_LIMIT_I8XX_DVO_DAC */
+        .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
+        .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
+        .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
+        .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
+        .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
+        .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
+        .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
+        .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX},
+        .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
+               .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST},
+        },
+       {                       /* INTEL_LIMIT_I8XX_LVDS */
+        .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
+        .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
+        .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
+        .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
+        .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
+        .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
+        .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
+        .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX},
+        .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
+               .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST},
+        },
+       {                       /* INTEL_LIMIT_I9XX_SDVO_DAC */
+        .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
+        .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
+        .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
+        .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
+        .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
+        .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
+        .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX},
+        .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
+        .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
+               .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast =
+               I9XX_P2_SDVO_DAC_FAST},
+        },
+       {                       /* INTEL_LIMIT_I9XX_LVDS */
+        .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
+        .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
+        .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
+        .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
+        .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
+        .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
+        .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX},
+        .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
+        /* The single-channel range is 25-112Mhz, and dual-channel
+         * is 80-224Mhz.  Prefer single channel as much as possible.
+         */
+        .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
+               .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST},
+        },
+};
+
+static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
+{
+       const struct psb_intel_limit_t *limit;
+
+       if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+               limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
+       else
+               limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+       return limit;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+
+static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
+{
+       clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+       clock->p = clock->p1 * clock->p2;
+       clock->vco = refclk * clock->m / (clock->n + 2);
+       clock->dot = clock->vco / clock->p;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
+
+static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock)
+{
+       clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+       clock->p = clock->p1 * clock->p2;
+       clock->vco = refclk * clock->m / (clock->n + 2);
+       clock->dot = clock->vco / clock->p;
+}
+
+static void psb_intel_clock(struct drm_device *dev, int refclk,
+                       struct psb_intel_clock_t *clock)
+{
+       return i9xx_clock(refclk, clock);
+}
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_connector *l_entry;
+
+       list_for_each_entry(l_entry, &mode_config->connector_list, head) {
+               if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
+                       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(l_entry);
+                       if (psb_intel_encoder->type == type)
+                               return true;
+               }
+       }
+       return false;
+}
+
+#define INTELPllInvalid(s)   { /* ErrorF (s) */; return false; }
+/**
+ * Returns whether the given set of divisors are valid for a given refclk with
+ * the given connectors.
+ */
+
+static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc,
+                              struct psb_intel_clock_t *clock)
+{
+       const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
+
+       if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+               INTELPllInvalid("p1 out of range\n");
+       if (clock->p < limit->p.min || limit->p.max < clock->p)
+               INTELPllInvalid("p out of range\n");
+       if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+               INTELPllInvalid("m2 out of range\n");
+       if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+               INTELPllInvalid("m1 out of range\n");
+       if (clock->m1 <= clock->m2)
+               INTELPllInvalid("m1 <= m2\n");
+       if (clock->m < limit->m.min || limit->m.max < clock->m)
+               INTELPllInvalid("m out of range\n");
+       if (clock->n < limit->n.min || limit->n.max < clock->n)
+               INTELPllInvalid("n out of range\n");
+       if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+               INTELPllInvalid("vco out of range\n");
+       /* XXX: We may need to be checking "Dot clock"
+        * depending on the multiplier, connector, etc.,
+        * rather than just a single range.
+        */
+       if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+               INTELPllInvalid("dot out of range\n");
+
+       return true;
+}
+
+/**
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.  The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
+                               int refclk,
+                               struct psb_intel_clock_t *best_clock)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_clock_t clock;
+       const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
+       int err = target;
+
+       if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+           (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
+               /*
+                * For LVDS, if the panel is on, just rely on its current
+                * settings for dual-channel.  We haven't figured out how to
+                * reliably set up different single/dual channel state, if we
+                * even can.
+                */
+               if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+                   LVDS_CLKB_POWER_UP)
+                       clock.p2 = limit->p2.p2_fast;
+               else
+                       clock.p2 = limit->p2.p2_slow;
+       } else {
+               if (target < limit->p2.dot_limit)
+                       clock.p2 = limit->p2.p2_slow;
+               else
+                       clock.p2 = limit->p2.p2_fast;
+       }
+
+       memset(best_clock, 0, sizeof(*best_clock));
+
+       for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+            clock.m1++) {
+               for (clock.m2 = limit->m2.min;
+                    clock.m2 < clock.m1 && clock.m2 <= limit->m2.max;
+                    clock.m2++) {
+                       for (clock.n = limit->n.min;
+                            clock.n <= limit->n.max; clock.n++) {
+                               for (clock.p1 = limit->p1.min;
+                                    clock.p1 <= limit->p1.max;
+                                    clock.p1++) {
+                                       int this_err;
+
+                                       psb_intel_clock(dev, refclk, &clock);
+
+                                       if (!psb_intel_PLL_is_valid
+                                           (crtc, &clock))
+                                               continue;
+
+                                       this_err = abs(clock.dot - target);
+                                       if (this_err < err) {
+                                               *best_clock = clock;
+                                               err = this_err;
+                                       }
+                               }
+                       }
+               }
+       }
+
+       return err != target;
+}
+
+void psb_intel_wait_for_vblank(struct drm_device *dev)
+{
+       /* Wait for 20ms, i.e. one cycle at 50hz. */
+       mdelay(20);
+}
+
+int psb_intel_pipe_set_base(struct drm_crtc *crtc,
+                           int x, int y, struct drm_framebuffer *old_fb)
+{
+       struct drm_device *dev = crtc->dev;
+       /* struct drm_i915_master_private *master_priv; */
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+       int pipe = psb_intel_crtc->pipe;
+       unsigned long start, offset;
+       int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
+       int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
+       int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       u32 dspcntr;
+       int ret = 0;
+
+       if (!gma_power_begin(dev, true))
+               return 0;
+
+       /* no fb bound */
+       if (!crtc->fb) {
+               dev_dbg(dev->dev, "No FB bound\n");
+               goto psb_intel_pipe_cleaner;
+       }
+
+       /* We are displaying this buffer, make sure it is actually loaded
+          into the GTT */
+       ret = psb_gtt_pin(psbfb->gtt);
+       if (ret < 0)
+               goto psb_intel_pipe_set_base_exit;
+       start = psbfb->gtt->offset;
+
+       offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+
+       REG_WRITE(dspstride, crtc->fb->pitches[0]);
+
+       dspcntr = REG_READ(dspcntr_reg);
+       dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+       switch (crtc->fb->bits_per_pixel) {
+       case 8:
+               dspcntr |= DISPPLANE_8BPP;
+               break;
+       case 16:
+               if (crtc->fb->depth == 15)
+                       dspcntr |= DISPPLANE_15_16BPP;
+               else
+                       dspcntr |= DISPPLANE_16BPP;
+               break;
+       case 24:
+       case 32:
+               dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+               break;
+       default:
+               dev_err(dev->dev, "Unknown color depth\n");
+               ret = -EINVAL;
+               psb_gtt_unpin(psbfb->gtt);
+               goto psb_intel_pipe_set_base_exit;
+       }
+       REG_WRITE(dspcntr_reg, dspcntr);
+
+
+       if (0 /* FIXMEAC - check what PSB needs */) {
+               REG_WRITE(dspbase, offset);
+               REG_READ(dspbase);
+               REG_WRITE(dspsurf, start);
+               REG_READ(dspsurf);
+       } else {
+               REG_WRITE(dspbase, start + offset);
+               REG_READ(dspbase);
+       }
+
+psb_intel_pipe_cleaner:
+       /* If there was a previous display we can now unpin it */
+       if (old_fb)
+               psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+
+psb_intel_pipe_set_base_exit:
+       gma_power_end(dev);
+       return ret;
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct drm_device *dev = crtc->dev;
+       /* struct drm_i915_master_private *master_priv; */
+       /* struct drm_i915_private *dev_priv = dev->dev_private; */
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
+       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+       u32 temp;
+       bool enabled;
+
+       /* XXX: When our outputs are all unaware of DPMS modes other than off
+        * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+        */
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+               /* Enable the DPLL */
+               temp = REG_READ(dpll_reg);
+               if ((temp & DPLL_VCO_ENABLE) == 0) {
+                       REG_WRITE(dpll_reg, temp);
+                       REG_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+                       REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+                       REG_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+                       REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+                       REG_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+               }
+
+               /* Enable the pipe */
+               temp = REG_READ(pipeconf_reg);
+               if ((temp & PIPEACONF_ENABLE) == 0)
+                       REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+
+               /* Enable the plane */
+               temp = REG_READ(dspcntr_reg);
+               if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+                       REG_WRITE(dspcntr_reg,
+                                 temp | DISPLAY_PLANE_ENABLE);
+                       /* Flush the plane changes */
+                       REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+               }
+
+               psb_intel_crtc_load_lut(crtc);
+
+               /* Give the overlay scaler a chance to enable
+                * if it's on this pipe */
+               /* psb_intel_crtc_dpms_video(crtc, true); TODO */
+               break;
+       case DRM_MODE_DPMS_OFF:
+               /* Give the overlay scaler a chance to disable
+                * if it's on this pipe */
+               /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+
+               /* Disable the VGA plane that we never use */
+               REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+               /* Disable display plane */
+               temp = REG_READ(dspcntr_reg);
+               if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+                       REG_WRITE(dspcntr_reg,
+                                 temp & ~DISPLAY_PLANE_ENABLE);
+                       /* Flush the plane changes */
+                       REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+                       REG_READ(dspbase_reg);
+               }
+
+               /* Next, disable display pipes */
+               temp = REG_READ(pipeconf_reg);
+               if ((temp & PIPEACONF_ENABLE) != 0) {
+                       REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+                       REG_READ(pipeconf_reg);
+               }
+
+               /* Wait for vblank for the disable to take effect. */
+               psb_intel_wait_for_vblank(dev);
+
+               temp = REG_READ(dpll_reg);
+               if ((temp & DPLL_VCO_ENABLE) != 0) {
+                       REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
+                       REG_READ(dpll_reg);
+               }
+
+               /* Wait for the clocks to turn off. */
+               udelay(150);
+               break;
+       }
+
+       enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+
+       /*Set FIFO Watermarks*/
+       REG_WRITE(DSPARB, 0x3F3E);
+}
+
+static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
+{
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void psb_intel_crtc_commit(struct drm_crtc *crtc)
+{
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+void psb_intel_encoder_prepare(struct drm_encoder *encoder)
+{
+       struct drm_encoder_helper_funcs *encoder_funcs =
+           encoder->helper_private;
+       /* lvds has its own version of prepare see psb_intel_lvds_prepare */
+       encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void psb_intel_encoder_commit(struct drm_encoder *encoder)
+{
+       struct drm_encoder_helper_funcs *encoder_funcs =
+           encoder->helper_private;
+       /* lvds has its own version of commit see psb_intel_lvds_commit */
+       encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+void psb_intel_encoder_destroy(struct drm_encoder *encoder)
+{
+       struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+
+       drm_encoder_cleanup(encoder);
+       kfree(intel_encoder);
+}
+
+static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
+{
+       u32 pfit_control;
+
+       pfit_control = REG_READ(PFIT_CONTROL);
+
+       /* See if the panel fitter is in use */
+       if ((pfit_control & PFIT_ENABLE) == 0)
+               return -1;
+       /* Must be on PIPE 1 for PSB */
+       return 1;
+}
+
+static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
+                              struct drm_display_mode *mode,
+                              struct drm_display_mode *adjusted_mode,
+                              int x, int y,
+                              struct drm_framebuffer *old_fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       int pipe = psb_intel_crtc->pipe;
+       int fp_reg = (pipe == 0) ? FPA0 : FPB0;
+       int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+       int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+       int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+       int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+       int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+       int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+       int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+       int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+       int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+       int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+       int refclk;
+       struct psb_intel_clock_t clock;
+       u32 dpll = 0, fp = 0, dspcntr, pipeconf;
+       bool ok, is_sdvo = false, is_dvo = false;
+       bool is_crt = false, is_lvds = false, is_tv = false;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_connector *connector;
+
+       /* No scan out no play */
+       if (crtc->fb == NULL) {
+               crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+               return 0;
+       }
+
+       list_for_each_entry(connector, &mode_config->connector_list, head) {
+               struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+
+               if (!connector->encoder
+                   || connector->encoder->crtc != crtc)
+                       continue;
+
+               switch (psb_intel_encoder->type) {
+               case INTEL_OUTPUT_LVDS:
+                       is_lvds = true;
+                       break;
+               case INTEL_OUTPUT_SDVO:
+                       is_sdvo = true;
+                       break;
+               case INTEL_OUTPUT_DVO:
+                       is_dvo = true;
+                       break;
+               case INTEL_OUTPUT_TVOUT:
+                       is_tv = true;
+                       break;
+               case INTEL_OUTPUT_ANALOG:
+                       is_crt = true;
+                       break;
+               }
+       }
+
+       refclk = 96000;
+
+       ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
+                                &clock);
+       if (!ok) {
+               dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
+               return 0;
+       }
+
+       fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+
+       dpll = DPLL_VGA_MODE_DIS;
+       if (is_lvds) {
+               dpll |= DPLLB_MODE_LVDS;
+               dpll |= DPLL_DVO_HIGH_SPEED;
+       } else
+               dpll |= DPLLB_MODE_DAC_SERIAL;
+       if (is_sdvo) {
+               int sdvo_pixel_multiply =
+                           adjusted_mode->clock / mode->clock;
+               dpll |= DPLL_DVO_HIGH_SPEED;
+               dpll |=
+                   (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+       }
+
+       /* compute bitmask from p1 value */
+       dpll |= (1 << (clock.p1 - 1)) << 16;
+       switch (clock.p2) {
+       case 5:
+               dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+               break;
+       case 7:
+               dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+               break;
+       case 10:
+               dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+               break;
+       case 14:
+               dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+               break;
+       }
+
+       if (is_tv) {
+               /* XXX: just matching BIOS for now */
+/*     dpll |= PLL_REF_INPUT_TVCLKINBC; */
+               dpll |= 3;
+       }
+       dpll |= PLL_REF_INPUT_DREFCLK;
+
+       /* setup pipeconf */
+       pipeconf = REG_READ(pipeconf_reg);
+
+       /* Set up the display plane register */
+       dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+       if (pipe == 0)
+               dspcntr |= DISPPLANE_SEL_PIPE_A;
+       else
+               dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+       dspcntr |= DISPLAY_PLANE_ENABLE;
+       pipeconf |= PIPEACONF_ENABLE;
+       dpll |= DPLL_VCO_ENABLE;
+
+
+       /* Disable the panel fitter if it was on our pipe */
+       if (psb_intel_panel_fitter_pipe(dev) == pipe)
+               REG_WRITE(PFIT_CONTROL, 0);
+
+       drm_mode_debug_printmodeline(mode);
+
+       if (dpll & DPLL_VCO_ENABLE) {
+               REG_WRITE(fp_reg, fp);
+               REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
+               REG_READ(dpll_reg);
+               udelay(150);
+       }
+
+       /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+        * This is an exception to the general rule that mode_set doesn't turn
+        * things on.
+        */
+       if (is_lvds) {
+               u32 lvds = REG_READ(LVDS);
+
+               lvds &= ~LVDS_PIPEB_SELECT;
+               if (pipe == 1)
+                       lvds |= LVDS_PIPEB_SELECT;
+
+               lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+               /* Set the B0-B3 data pairs corresponding to
+                * whether we're going to
+                * set the DPLLs for dual-channel mode or not.
+                */
+               lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+               if (clock.p2 == 7)
+                       lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+
+               /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+                * appropriately here, but we need to look more
+                * thoroughly into how panels behave in the two modes.
+                */
+
+               REG_WRITE(LVDS, lvds);
+               REG_READ(LVDS);
+       }
+
+       REG_WRITE(fp_reg, fp);
+       REG_WRITE(dpll_reg, dpll);
+       REG_READ(dpll_reg);
+       /* Wait for the clocks to stabilize. */
+       udelay(150);
+
+       /* write it again -- the BIOS does, after all */
+       REG_WRITE(dpll_reg, dpll);
+
+       REG_READ(dpll_reg);
+       /* Wait for the clocks to stabilize. */
+       udelay(150);
+
+       REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+                 ((adjusted_mode->crtc_htotal - 1) << 16));
+       REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+                 ((adjusted_mode->crtc_hblank_end - 1) << 16));
+       REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+                 ((adjusted_mode->crtc_hsync_end - 1) << 16));
+       REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+                 ((adjusted_mode->crtc_vtotal - 1) << 16));
+       REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+                 ((adjusted_mode->crtc_vblank_end - 1) << 16));
+       REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+                 ((adjusted_mode->crtc_vsync_end - 1) << 16));
+       /* pipesrc and dspsize control the size that is scaled from,
+        * which should always be the user's requested size.
+        */
+       REG_WRITE(dspsize_reg,
+                 ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+       REG_WRITE(dsppos_reg, 0);
+       REG_WRITE(pipesrc_reg,
+                 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+       REG_WRITE(pipeconf_reg, pipeconf);
+       REG_READ(pipeconf_reg);
+
+       psb_intel_wait_for_vblank(dev);
+
+       REG_WRITE(dspcntr_reg, dspcntr);
+
+       /* Flush the plane changes */
+       crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+
+       psb_intel_wait_for_vblank(dev);
+
+       return 0;
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_psb_private *dev_priv =
+                               (struct drm_psb_private *)dev->dev_private;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int palreg = PALETTE_A;
+       int i;
+
+       /* The clocks have to be on to load the palette. */
+       if (!crtc->enabled)
+               return;
+
+       switch (psb_intel_crtc->pipe) {
+       case 0:
+               break;
+       case 1:
+               palreg = PALETTE_B;
+               break;
+       case 2:
+               palreg = PALETTE_C;
+               break;
+       default:
+               dev_err(dev->dev, "Illegal Pipe Number.\n");
+               return;
+       }
+
+       if (gma_power_begin(dev, false)) {
+               for (i = 0; i < 256; i++) {
+                       REG_WRITE(palreg + 4 * i,
+                                 ((psb_intel_crtc->lut_r[i] +
+                                 psb_intel_crtc->lut_adj[i]) << 16) |
+                                 ((psb_intel_crtc->lut_g[i] +
+                                 psb_intel_crtc->lut_adj[i]) << 8) |
+                                 (psb_intel_crtc->lut_b[i] +
+                                 psb_intel_crtc->lut_adj[i]));
+               }
+               gma_power_end(dev);
+       } else {
+               for (i = 0; i < 256; i++) {
+                       dev_priv->save_palette_a[i] =
+                                 ((psb_intel_crtc->lut_r[i] +
+                                 psb_intel_crtc->lut_adj[i]) << 16) |
+                                 ((psb_intel_crtc->lut_g[i] +
+                                 psb_intel_crtc->lut_adj[i]) << 8) |
+                                 (psb_intel_crtc->lut_b[i] +
+                                 psb_intel_crtc->lut_adj[i]);
+               }
+
+       }
+}
+
+/**
+ * Save HW states of giving crtc
+ */
+static void psb_intel_crtc_save(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       /* struct drm_psb_private *dev_priv =
+                       (struct drm_psb_private *)dev->dev_private; */
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+       int pipeA = (psb_intel_crtc->pipe == 0);
+       uint32_t paletteReg;
+       int i;
+
+       if (!crtc_state) {
+               dev_err(dev->dev, "No CRTC state found\n");
+               return;
+       }
+
+       crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
+       crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
+       crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
+       crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
+       crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
+       crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
+       crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
+       crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
+       crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
+       crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
+       crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
+       crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
+       crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
+
+       /*NOTE: DSPSIZE DSPPOS only for psb*/
+       crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
+       crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
+
+       crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
+
+       paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+       for (i = 0; i < 256; ++i)
+               crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
+}
+
+/**
+ * Restore HW states of giving crtc
+ */
+static void psb_intel_crtc_restore(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       /* struct drm_psb_private * dev_priv =
+                               (struct drm_psb_private *)dev->dev_private; */
+       struct psb_intel_crtc *psb_intel_crtc =  to_psb_intel_crtc(crtc);
+       struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+       /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
+       int pipeA = (psb_intel_crtc->pipe == 0);
+       uint32_t paletteReg;
+       int i;
+
+       if (!crtc_state) {
+               dev_err(dev->dev, "No crtc state\n");
+               return;
+       }
+
+       if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
+               REG_WRITE(pipeA ? DPLL_A : DPLL_B,
+                       crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+               REG_READ(pipeA ? DPLL_A : DPLL_B);
+               udelay(150);
+       }
+
+       REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
+       REG_READ(pipeA ? FPA0 : FPB0);
+
+       REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
+       REG_READ(pipeA ? FPA1 : FPB1);
+
+       REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
+       REG_READ(pipeA ? DPLL_A : DPLL_B);
+       udelay(150);
+
+       REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
+       REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
+       REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
+       REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
+       REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
+       REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
+       REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
+
+       REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
+       REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
+
+       REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
+       REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+       REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
+
+       psb_intel_wait_for_vblank(dev);
+
+       REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
+       REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+
+       psb_intel_wait_for_vblank(dev);
+
+       paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+       for (i = 0; i < 256; ++i)
+               REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
+}
+
+static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
+                                struct drm_file *file_priv,
+                                uint32_t handle,
+                                uint32_t width, uint32_t height)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
+       uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
+       uint32_t temp;
+       size_t addr = 0;
+       struct gtt_range *gt;
+       struct drm_gem_object *obj;
+       int ret;
+
+       /* if we want to turn of the cursor ignore width and height */
+       if (!handle) {
+               /* turn off the cursor */
+               temp = CURSOR_MODE_DISABLE;
+
+               if (gma_power_begin(dev, false)) {
+                       REG_WRITE(control, temp);
+                       REG_WRITE(base, 0);
+                       gma_power_end(dev);
+               }
+
+               /* Unpin the old GEM object */
+               if (psb_intel_crtc->cursor_obj) {
+                       gt = container_of(psb_intel_crtc->cursor_obj,
+                                                       struct gtt_range, gem);
+                       psb_gtt_unpin(gt);
+                       drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+                       psb_intel_crtc->cursor_obj = NULL;
+               }
+
+               return 0;
+       }
+
+       /* Currently we only support 64x64 cursors */
+       if (width != 64 || height != 64) {
+               dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
+               return -EINVAL;
+       }
+
+       obj = drm_gem_object_lookup(dev, file_priv, handle);
+       if (!obj)
+               return -ENOENT;
+
+       if (obj->size < width * height * 4) {
+               dev_dbg(dev->dev, "buffer is to small\n");
+               return -ENOMEM;
+       }
+
+       gt = container_of(obj, struct gtt_range, gem);
+
+       /* Pin the memory into the GTT */
+       ret = psb_gtt_pin(gt);
+       if (ret) {
+               dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
+               return ret;
+       }
+
+
+       addr = gt->offset;      /* Or resource.start ??? */
+
+       psb_intel_crtc->cursor_addr = addr;
+
+       temp = 0;
+       /* set the pipe for the cursor */
+       temp |= (pipe << 28);
+       temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+
+       if (gma_power_begin(dev, false)) {
+               REG_WRITE(control, temp);
+               REG_WRITE(base, addr);
+               gma_power_end(dev);
+       }
+
+       /* unpin the old bo */
+       if (psb_intel_crtc->cursor_obj) {
+               gt = container_of(psb_intel_crtc->cursor_obj,
+                                                       struct gtt_range, gem);
+               psb_gtt_unpin(gt);
+               drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+               psb_intel_crtc->cursor_obj = obj;
+       }
+       return 0;
+}
+
+static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+       struct drm_device *dev = crtc->dev;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       uint32_t temp = 0;
+       uint32_t addr;
+
+
+       if (x < 0) {
+               temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+               x = -x;
+       }
+       if (y < 0) {
+               temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+               y = -y;
+       }
+
+       temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
+       temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+
+       addr = psb_intel_crtc->cursor_addr;
+
+       if (gma_power_begin(dev, false)) {
+               REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
+               REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
+               gma_power_end(dev);
+       }
+       return 0;
+}
+
+void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+                        u16 *green, u16 *blue, uint32_t type, uint32_t size)
+{
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int i;
+
+       if (size != 256)
+               return;
+
+       for (i = 0; i < 256; i++) {
+               psb_intel_crtc->lut_r[i] = red[i] >> 8;
+               psb_intel_crtc->lut_g[i] = green[i] >> 8;
+               psb_intel_crtc->lut_b[i] = blue[i] >> 8;
+       }
+
+       psb_intel_crtc_load_lut(crtc);
+}
+
+static int psb_crtc_set_config(struct drm_mode_set *set)
+{
+       int ret;
+       struct drm_device *dev = set->crtc->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (!dev_priv->rpm_enabled)
+               return drm_crtc_helper_set_config(set);
+
+       pm_runtime_forbid(&dev->pdev->dev);
+       ret = drm_crtc_helper_set_config(set);
+       pm_runtime_allow(&dev->pdev->dev);
+       return ret;
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+static int psb_intel_crtc_clock_get(struct drm_device *dev,
+                               struct drm_crtc *crtc)
+{
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       u32 dpll;
+       u32 fp;
+       struct psb_intel_clock_t clock;
+       bool is_lvds;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (gma_power_begin(dev, false)) {
+               dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
+               if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+                       fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
+               else
+                       fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
+               is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
+               gma_power_end(dev);
+       } else {
+               dpll = (pipe == 0) ?
+                       dev_priv->saveDPLL_A : dev_priv->saveDPLL_B;
+
+               if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+                       fp = (pipe == 0) ?
+                               dev_priv->saveFPA0 :
+                               dev_priv->saveFPB0;
+               else
+                       fp = (pipe == 0) ?
+                               dev_priv->saveFPA1 :
+                               dev_priv->saveFPB1;
+
+               is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN);
+       }
+
+       clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+       clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+       clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+
+       if (is_lvds) {
+               clock.p1 =
+                   ffs((dpll &
+                        DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+                       DPLL_FPA01_P1_POST_DIV_SHIFT);
+               clock.p2 = 14;
+
+               if ((dpll & PLL_REF_INPUT_MASK) ==
+                   PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+                       /* XXX: might not be 66MHz */
+                       i8xx_clock(66000, &clock);
+               } else
+                       i8xx_clock(48000, &clock);
+       } else {
+               if (dpll & PLL_P1_DIVIDE_BY_TWO)
+                       clock.p1 = 2;
+               else {
+                       clock.p1 =
+                           ((dpll &
+                             DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+                            DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+               }
+               if (dpll & PLL_P2_DIVIDE_BY_4)
+                       clock.p2 = 4;
+               else
+                       clock.p2 = 2;
+
+               i8xx_clock(48000, &clock);
+       }
+
+       /* XXX: It would be nice to validate the clocks, but we can't reuse
+        * i830PllIsValid() because it relies on the xf86_config connector
+        * configuration being accurate, which it isn't necessarily.
+        */
+
+       return clock.dot;
+}
+
+/** Returns the currently programmed mode of the given pipe. */
+struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
+                                            struct drm_crtc *crtc)
+{
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       int pipe = psb_intel_crtc->pipe;
+       struct drm_display_mode *mode;
+       int htot;
+       int hsync;
+       int vtot;
+       int vsync;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       if (gma_power_begin(dev, false)) {
+               htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
+               hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
+               vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
+               vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+               gma_power_end(dev);
+       } else {
+               htot = (pipe == 0) ?
+                       dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B;
+               hsync = (pipe == 0) ?
+                       dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B;
+               vtot = (pipe == 0) ?
+                       dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B;
+               vsync = (pipe == 0) ?
+                       dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B;
+       }
+
+       mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+       if (!mode)
+               return NULL;
+
+       mode->clock = psb_intel_crtc_clock_get(dev, crtc);
+       mode->hdisplay = (htot & 0xffff) + 1;
+       mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+       mode->hsync_start = (hsync & 0xffff) + 1;
+       mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+       mode->vdisplay = (vtot & 0xffff) + 1;
+       mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+       mode->vsync_start = (vsync & 0xffff) + 1;
+       mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+
+       drm_mode_set_name(mode);
+       drm_mode_set_crtcinfo(mode, 0);
+
+       return mode;
+}
+
+void psb_intel_crtc_destroy(struct drm_crtc *crtc)
+{
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       struct gtt_range *gt;
+
+       /* Unpin the old GEM object */
+       if (psb_intel_crtc->cursor_obj) {
+               gt = container_of(psb_intel_crtc->cursor_obj,
+                                               struct gtt_range, gem);
+               psb_gtt_unpin(gt);
+               drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+               psb_intel_crtc->cursor_obj = NULL;
+       }
+       kfree(psb_intel_crtc->crtc_state);
+       drm_crtc_cleanup(crtc);
+       kfree(psb_intel_crtc);
+}
+
+const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
+       .dpms = psb_intel_crtc_dpms,
+       .mode_fixup = psb_intel_crtc_mode_fixup,
+       .mode_set = psb_intel_crtc_mode_set,
+       .mode_set_base = psb_intel_pipe_set_base,
+       .prepare = psb_intel_crtc_prepare,
+       .commit = psb_intel_crtc_commit,
+};
+
+const struct drm_crtc_funcs psb_intel_crtc_funcs = {
+       .save = psb_intel_crtc_save,
+       .restore = psb_intel_crtc_restore,
+       .cursor_set = psb_intel_crtc_cursor_set,
+       .cursor_move = psb_intel_crtc_cursor_move,
+       .gamma_set = psb_intel_crtc_gamma_set,
+       .set_config = psb_crtc_set_config,
+       .destroy = psb_intel_crtc_destroy,
+};
+
+/*
+ * Set the default value of cursor control and base register
+ * to zero. This is a workaround for h/w defect on Oaktrail
+ */
+static void psb_intel_cursor_init(struct drm_device *dev, int pipe)
+{
+       u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
+       u32 base[3] = { CURABASE, CURBBASE, CURCBASE };
+
+       REG_WRITE(control[pipe], 0);
+       REG_WRITE(base[pipe], 0);
+}
+
+void psb_intel_crtc_init(struct drm_device *dev, int pipe,
+                    struct psb_intel_mode_device *mode_dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_crtc *psb_intel_crtc;
+       int i;
+       uint16_t *r_base, *g_base, *b_base;
+
+       /* We allocate a extra array of drm_connector pointers
+        * for fbdev after the crtc */
+       psb_intel_crtc =
+           kzalloc(sizeof(struct psb_intel_crtc) +
+                   (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
+                   GFP_KERNEL);
+       if (psb_intel_crtc == NULL)
+               return;
+
+       psb_intel_crtc->crtc_state =
+               kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL);
+       if (!psb_intel_crtc->crtc_state) {
+               dev_err(dev->dev, "Crtc state error: No memory\n");
+               kfree(psb_intel_crtc);
+               return;
+       }
+
+       /* Set the CRTC operations from the chip specific data */
+       drm_crtc_init(dev, &psb_intel_crtc->base, dev_priv->ops->crtc_funcs);
+
+       drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256);
+       psb_intel_crtc->pipe = pipe;
+       psb_intel_crtc->plane = pipe;
+
+       r_base = psb_intel_crtc->base.gamma_store;
+       g_base = r_base + 256;
+       b_base = g_base + 256;
+       for (i = 0; i < 256; i++) {
+               psb_intel_crtc->lut_r[i] = i;
+               psb_intel_crtc->lut_g[i] = i;
+               psb_intel_crtc->lut_b[i] = i;
+               r_base[i] = i << 8;
+               g_base[i] = i << 8;
+               b_base[i] = i << 8;
+
+               psb_intel_crtc->lut_adj[i] = 0;
+       }
+
+       psb_intel_crtc->mode_dev = mode_dev;
+       psb_intel_crtc->cursor_addr = 0;
+
+       drm_crtc_helper_add(&psb_intel_crtc->base,
+                                               dev_priv->ops->crtc_helper);
+
+       /* Setup the array of drm_connector pointer array */
+       psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base;
+       BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+              dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] != NULL);
+       dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] =
+                                                       &psb_intel_crtc->base;
+       dev_priv->pipe_to_crtc_mapping[psb_intel_crtc->pipe] =
+                                                       &psb_intel_crtc->base;
+       psb_intel_crtc->mode_set.connectors =
+           (struct drm_connector **) (psb_intel_crtc + 1);
+       psb_intel_crtc->mode_set.num_connectors = 0;
+       psb_intel_cursor_init(dev, pipe);
+}
+
+int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data;
+       struct drm_mode_object *drmmode_obj;
+       struct psb_intel_crtc *crtc;
+
+       if (!dev_priv) {
+               dev_err(dev->dev, "called with no initialization\n");
+               return -EINVAL;
+       }
+
+       drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
+                       DRM_MODE_OBJECT_CRTC);
+
+       if (!drmmode_obj) {
+               dev_err(dev->dev, "no such CRTC id\n");
+               return -EINVAL;
+       }
+
+       crtc = to_psb_intel_crtc(obj_to_crtc(drmmode_obj));
+       pipe_from_crtc_id->pipe = crtc->pipe;
+
+       return 0;
+}
+
+struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
+{
+       struct drm_crtc *crtc = NULL;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+               if (psb_intel_crtc->pipe == pipe)
+                       break;
+       }
+       return crtc;
+}
+
+int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
+{
+       int index_mask = 0;
+       struct drm_connector *connector;
+       int entry = 0;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list,
+                           head) {
+               struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+               if (type_mask & (1 << psb_intel_encoder->type))
+                       index_mask |= (1 << entry);
+               entry++;
+       }
+       return index_mask;
+}
+
+
+void psb_intel_modeset_cleanup(struct drm_device *dev)
+{
+       drm_mode_config_cleanup(dev);
+}
+
+
+/* current intel driver doesn't take advantage of encoders
+   always give back the encoder for the connector
+*/
+struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
+{
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+
+       return &psb_intel_encoder->base;
+}
+
+void psb_intel_connector_attach_encoder(struct psb_intel_connector *connector,
+                                       struct psb_intel_encoder *encoder)
+{
+       connector->encoder = encoder;
+       drm_mode_connector_attach_encoder(&connector->base,
+                                         &encoder->base);
+}
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.h b/drivers/gpu/drm/gma500/psb_intel_display.h
new file mode 100644 (file)
index 0000000..535b49a
--- /dev/null
@@ -0,0 +1,28 @@
+/* copyright (c) 2008, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#ifndef _INTEL_DISPLAY_H_
+#define _INTEL_DISPLAY_H_
+
+bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
+void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+                        u16 *green, u16 *blue, uint32_t type, uint32_t size);
+void psb_intel_crtc_destroy(struct drm_crtc *crtc);
+
+#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
new file mode 100644 (file)
index 0000000..f40535e
--- /dev/null
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2009-2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __INTEL_DRV_H__
+#define __INTEL_DRV_H__
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <linux/gpio.h>
+
+/*
+ * Display related stuff
+ */
+
+/* store information about an Ixxx DVO */
+/* The i830->i865 use multiple DVOs with multiple i2cs */
+/* the i915, i945 have a single sDVO i2c bus - which is different */
+#define MAX_OUTPUTS 6
+/* maximum connectors per crtcs in the mode set */
+#define INTELFB_CONN_LIMIT 4
+
+#define INTEL_I2C_BUS_DVO 1
+#define INTEL_I2C_BUS_SDVO 2
+
+/* Intel Pipe Clone Bit */
+#define INTEL_HDMIB_CLONE_BIT 1
+#define INTEL_HDMIC_CLONE_BIT 2
+#define INTEL_HDMID_CLONE_BIT 3
+#define INTEL_HDMIE_CLONE_BIT 4
+#define INTEL_HDMIF_CLONE_BIT 5
+#define INTEL_SDVO_NON_TV_CLONE_BIT 6
+#define INTEL_SDVO_TV_CLONE_BIT 7
+#define INTEL_SDVO_LVDS_CLONE_BIT 8
+#define INTEL_ANALOG_CLONE_BIT 9
+#define INTEL_TV_CLONE_BIT 10
+#define INTEL_DP_B_CLONE_BIT 11
+#define INTEL_DP_C_CLONE_BIT 12
+#define INTEL_DP_D_CLONE_BIT 13
+#define INTEL_LVDS_CLONE_BIT 14
+#define INTEL_DVO_TMDS_CLONE_BIT 15
+#define INTEL_DVO_LVDS_CLONE_BIT 16
+#define INTEL_EDP_CLONE_BIT 17
+
+/* these are outputs from the chip - integrated only
+ * external chips are via DVO or SDVO output */
+#define INTEL_OUTPUT_UNUSED 0
+#define INTEL_OUTPUT_ANALOG 1
+#define INTEL_OUTPUT_DVO 2
+#define INTEL_OUTPUT_SDVO 3
+#define INTEL_OUTPUT_LVDS 4
+#define INTEL_OUTPUT_TVOUT 5
+#define INTEL_OUTPUT_HDMI 6
+#define INTEL_OUTPUT_MIPI 7
+#define INTEL_OUTPUT_MIPI2 8
+
+#define INTEL_DVO_CHIP_NONE 0
+#define INTEL_DVO_CHIP_LVDS 1
+#define INTEL_DVO_CHIP_TMDS 2
+#define INTEL_DVO_CHIP_TVOUT 4
+
+#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
+#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+
+static inline void
+psb_intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
+                               int multiplier)
+{
+       mode->clock *= multiplier;
+       mode->private_flags |= multiplier;
+}
+
+static inline int
+psb_intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
+{
+       return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK)
+              >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
+}
+
+
+/*
+ * Hold information useally put on the device driver privates here,
+ * since it needs to be shared across multiple of devices drivers privates.
+ */
+struct psb_intel_mode_device {
+
+       /*
+        * Abstracted memory manager operations
+        */
+        size_t(*bo_offset) (struct drm_device *dev, void *bo);
+
+       /*
+        * Cursor (Can go ?)
+        */
+       int cursor_needs_physical;
+
+       /*
+        * LVDS info
+        */
+       int backlight_duty_cycle;       /* restore backlight to this value */
+       bool panel_wants_dither;
+       struct drm_display_mode *panel_fixed_mode;
+       struct drm_display_mode *panel_fixed_mode2;
+       struct drm_display_mode *vbt_mode;      /* if any */
+
+       uint32_t saveBLC_PWM_CTL;
+};
+
+struct psb_intel_i2c_chan {
+       /* for getting at dev. private (mmio etc.) */
+       struct drm_device *drm_dev;
+       u32 reg;                /* GPIO reg */
+       struct i2c_adapter adapter;
+       struct i2c_algo_bit_data algo;
+       u8 slave_addr;
+};
+
+struct psb_intel_encoder {
+       struct drm_encoder base;
+       int type;
+       bool needs_tv_clock;
+       void (*hot_plug)(struct psb_intel_encoder *);
+       int crtc_mask;
+       int clone_mask;
+       void *dev_priv; /* For sdvo_priv, lvds_priv, etc... */
+
+       /* FIXME: Either make SDVO and LVDS store it's i2c here or give CDV it's
+          own set of output privates */
+       struct psb_intel_i2c_chan *i2c_bus;
+       struct psb_intel_i2c_chan *ddc_bus;
+};
+
+struct psb_intel_connector {
+       struct drm_connector base;
+       struct psb_intel_encoder *encoder;
+};
+
+struct psb_intel_crtc_state {
+       uint32_t saveDSPCNTR;
+       uint32_t savePIPECONF;
+       uint32_t savePIPESRC;
+       uint32_t saveDPLL;
+       uint32_t saveFP0;
+       uint32_t saveFP1;
+       uint32_t saveHTOTAL;
+       uint32_t saveHBLANK;
+       uint32_t saveHSYNC;
+       uint32_t saveVTOTAL;
+       uint32_t saveVBLANK;
+       uint32_t saveVSYNC;
+       uint32_t saveDSPSTRIDE;
+       uint32_t saveDSPSIZE;
+       uint32_t saveDSPPOS;
+       uint32_t saveDSPBASE;
+       uint32_t savePalette[256];
+};
+
+struct psb_intel_crtc {
+       struct drm_crtc base;
+       int pipe;
+       int plane;
+       uint32_t cursor_addr;
+       u8 lut_r[256], lut_g[256], lut_b[256];
+       u8 lut_adj[256];
+       struct psb_intel_framebuffer *fbdev_fb;
+       /* a mode_set for fbdev users on this crtc */
+       struct drm_mode_set mode_set;
+
+       /* GEM object that holds our cursor */
+       struct drm_gem_object *cursor_obj;
+
+       struct drm_display_mode saved_mode;
+       struct drm_display_mode saved_adjusted_mode;
+
+       struct psb_intel_mode_device *mode_dev;
+
+       /*crtc mode setting flags*/
+       u32 mode_flags;
+
+       /* Saved Crtc HW states */
+       struct psb_intel_crtc_state *crtc_state;
+};
+
+#define to_psb_intel_crtc(x)   \
+               container_of(x, struct psb_intel_crtc, base)
+#define to_psb_intel_connector(x) \
+               container_of(x, struct psb_intel_connector, base)
+#define to_psb_intel_encoder(x)        \
+               container_of(x, struct psb_intel_encoder, base)
+#define to_psb_intel_framebuffer(x)    \
+               container_of(x, struct psb_intel_framebuffer, base)
+
+struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
+                                       const u32 reg, const char *name);
+void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan);
+int psb_intel_ddc_get_modes(struct drm_connector *connector,
+                           struct i2c_adapter *adapter);
+extern bool psb_intel_ddc_probe(struct i2c_adapter *adapter);
+
+extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
+                           struct psb_intel_mode_device *mode_dev);
+extern void psb_intel_crt_init(struct drm_device *dev);
+extern bool psb_intel_sdvo_init(struct drm_device *dev, int output_device);
+extern void psb_intel_dvo_init(struct drm_device *dev);
+extern void psb_intel_tv_init(struct drm_device *dev);
+extern void psb_intel_lvds_init(struct drm_device *dev,
+                           struct psb_intel_mode_device *mode_dev);
+extern void psb_intel_lvds_set_brightness(struct drm_device *dev, int level);
+extern void oaktrail_lvds_init(struct drm_device *dev,
+                          struct psb_intel_mode_device *mode_dev);
+extern void oaktrail_wait_for_INTR_PKT_SENT(struct drm_device *dev);
+extern void oaktrail_dsi_init(struct drm_device *dev,
+                          struct psb_intel_mode_device *mode_dev);
+extern void mid_dsi_init(struct drm_device *dev,
+                   struct psb_intel_mode_device *mode_dev, int dsi_num);
+
+extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc);
+extern void psb_intel_encoder_prepare(struct drm_encoder *encoder);
+extern void psb_intel_encoder_commit(struct drm_encoder *encoder);
+extern void psb_intel_encoder_destroy(struct drm_encoder *encoder);
+
+static inline struct psb_intel_encoder *psb_intel_attached_encoder(
+                                               struct drm_connector *connector)
+{
+       return to_psb_intel_connector(connector)->encoder;
+}
+
+extern void psb_intel_connector_attach_encoder(
+                                       struct psb_intel_connector *connector,
+                                       struct psb_intel_encoder *encoder);
+
+extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
+                                             *connector);
+
+extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
+                                                   struct drm_crtc *crtc);
+extern void psb_intel_wait_for_vblank(struct drm_device *dev);
+extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv);
+extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
+                                                int pipe);
+extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
+                                            int sdvoB);
+extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector);
+extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
+                                  int enable);
+extern int intelfb_probe(struct drm_device *dev);
+extern int intelfb_remove(struct drm_device *dev,
+                         struct drm_framebuffer *fb);
+extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
+                                                       *dev, struct
+                                                       drm_mode_fb_cmd
+                                                       *mode_cmd,
+                                                       void *mm_private);
+extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+                                     struct drm_display_mode *mode,
+                                     struct drm_display_mode *adjusted_mode);
+extern int psb_intel_lvds_mode_valid(struct drm_connector *connector,
+                                    struct drm_display_mode *mode);
+extern int psb_intel_lvds_set_property(struct drm_connector *connector,
+                                       struct drm_property *property,
+                                       uint64_t value);
+extern void psb_intel_lvds_destroy(struct drm_connector *connector);
+extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs;
+
+/* intel_gmbus.c */
+extern void gma_intel_i2c_reset(struct drm_device *dev);
+extern int gma_intel_setup_gmbus(struct drm_device *dev);
+extern void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+extern void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+extern void gma_intel_teardown_gmbus(struct drm_device *dev);
+
+#endif                         /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
new file mode 100644 (file)
index 0000000..a25e4ca
--- /dev/null
@@ -0,0 +1,868 @@
+/*
+ * Copyright Â© 2006-2007 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ *     Dave Airlie <airlied@linux.ie>
+ *     Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+
+/*
+ * LVDS I2C backlight control macros
+ */
+#define BRIGHTNESS_MAX_LEVEL 100
+#define BRIGHTNESS_MASK 0xFF
+#define BLC_I2C_TYPE   0x01
+#define BLC_PWM_TYPT   0x02
+
+#define BLC_POLARITY_NORMAL 0
+#define BLC_POLARITY_INVERSE 1
+
+#define PSB_BLC_MAX_PWM_REG_FREQ       (0xFFFE)
+#define PSB_BLC_MIN_PWM_REG_FREQ       (0x2)
+#define PSB_BLC_PWM_PRECISION_FACTOR   (10)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT    (16)
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+
+struct psb_intel_lvds_priv {
+       /*
+        * Saved LVDO output states
+        */
+       uint32_t savePP_ON;
+       uint32_t savePP_OFF;
+       uint32_t saveLVDS;
+       uint32_t savePP_CONTROL;
+       uint32_t savePP_CYCLE;
+       uint32_t savePFIT_CONTROL;
+       uint32_t savePFIT_PGM_RATIOS;
+       uint32_t saveBLC_PWM_CTL;
+
+       struct psb_intel_i2c_chan *i2c_bus;
+       struct psb_intel_i2c_chan *ddc_bus;
+};
+
+
+/*
+ * Returns the maximum level of the backlight duty cycle field.
+ */
+static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 ret;
+
+       if (gma_power_begin(dev, false)) {
+               ret = REG_READ(BLC_PWM_CTL);
+               gma_power_end(dev);
+       } else /* Powered off, use the saved value */
+               ret = dev_priv->saveBLC_PWM_CTL;
+
+       /* Top 15bits hold the frequency mask */
+       ret = (ret &  BACKLIGHT_MODULATION_FREQ_MASK) >>
+                                       BACKLIGHT_MODULATION_FREQ_SHIFT;
+
+        ret *= 2;      /* Return a 16bit range as needed for setting */
+        if (ret == 0)
+                dev_err(dev->dev, "BL bug: Reg %08x save %08X\n",
+                        REG_READ(BLC_PWM_CTL), dev_priv->saveBLC_PWM_CTL);
+       return ret;
+}
+
+/*
+ * Set LVDS backlight level by I2C command
+ *
+ * FIXME: at some point we need to both track this for PM and also
+ * disable runtime pm on MRST if the brightness is nil (ie blanked)
+ */
+static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
+                                       unsigned int level)
+{
+       struct drm_psb_private *dev_priv =
+               (struct drm_psb_private *)dev->dev_private;
+
+       struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
+       u8 out_buf[2];
+       unsigned int blc_i2c_brightness;
+
+       struct i2c_msg msgs[] = {
+               {
+                       .addr = lvds_i2c_bus->slave_addr,
+                       .flags = 0,
+                       .len = 2,
+                       .buf = out_buf,
+               }
+       };
+
+       blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
+                            BRIGHTNESS_MASK /
+                            BRIGHTNESS_MAX_LEVEL);
+
+       if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+               blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
+
+       out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
+       out_buf[1] = (u8)blc_i2c_brightness;
+
+       if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1) {
+               dev_dbg(dev->dev, "I2C set brightness.(command, value) (%d, %d)\n",
+                       dev_priv->lvds_bl->brightnesscmd,
+                       blc_i2c_brightness);
+               return 0;
+       }
+
+       dev_err(dev->dev, "I2C transfer error\n");
+       return -1;
+}
+
+
+static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
+{
+       struct drm_psb_private *dev_priv =
+                       (struct drm_psb_private *)dev->dev_private;
+
+       u32 max_pwm_blc;
+       u32 blc_pwm_duty_cycle;
+
+       max_pwm_blc = psb_intel_lvds_get_max_backlight(dev);
+
+       /*BLC_PWM_CTL Should be initiated while backlight device init*/
+       BUG_ON(max_pwm_blc == 0);
+
+       blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
+
+       if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+               blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
+
+       blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+       REG_WRITE(BLC_PWM_CTL,
+                 (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+                 (blc_pwm_duty_cycle));
+
+        dev_info(dev->dev, "Backlight lvds set brightness %08x\n",
+                 (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+                 (blc_pwm_duty_cycle));
+
+       return 0;
+}
+
+/*
+ * Set LVDS backlight level either by I2C or PWM
+ */
+void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+
+       dev_dbg(dev->dev, "backlight level is %d\n", level);
+
+       if (!dev_priv->lvds_bl) {
+               dev_err(dev->dev, "NO LVDS backlight info\n");
+               return;
+       }
+
+       if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
+               psb_lvds_i2c_set_brightness(dev, level);
+       else
+               psb_lvds_pwm_set_brightness(dev, level);
+}
+
+/*
+ * Sets the backlight level.
+ *
+ * level: backlight level, from 0 to psb_intel_lvds_get_max_backlight().
+ */
+static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 blc_pwm_ctl;
+
+       if (gma_power_begin(dev, false)) {
+               blc_pwm_ctl = REG_READ(BLC_PWM_CTL);
+               blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+               REG_WRITE(BLC_PWM_CTL,
+                               (blc_pwm_ctl |
+                               (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+               dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
+                                       (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
+               gma_power_end(dev);
+       } else {
+               blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL &
+                               ~BACKLIGHT_DUTY_CYCLE_MASK;
+               dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
+                                       (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
+       }
+}
+
+/*
+ * Sets the power state for the panel.
+ */
+static void psb_intel_lvds_set_power(struct drm_device *dev, bool on)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+       u32 pp_status;
+
+       if (!gma_power_begin(dev, true)) {
+               dev_err(dev->dev, "set power, chip off!\n");
+               return;
+        }
+        
+       if (on) {
+               REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+                         POWER_TARGET_ON);
+               do {
+                       pp_status = REG_READ(PP_STATUS);
+               } while ((pp_status & PP_ON) == 0);
+
+               psb_intel_lvds_set_backlight(dev,
+                                            mode_dev->backlight_duty_cycle);
+       } else {
+               psb_intel_lvds_set_backlight(dev, 0);
+
+               REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+                         ~POWER_TARGET_ON);
+               do {
+                       pp_status = REG_READ(PP_STATUS);
+               } while (pp_status & PP_ON);
+       }
+
+       gma_power_end(dev);
+}
+
+static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+
+       if (mode == DRM_MODE_DPMS_ON)
+               psb_intel_lvds_set_power(dev, true);
+       else
+               psb_intel_lvds_set_power(dev, false);
+
+       /* XXX: We never power down the LVDS pairs. */
+}
+
+static void psb_intel_lvds_save(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_psb_private *dev_priv =
+               (struct drm_psb_private *)dev->dev_private;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+       struct psb_intel_lvds_priv *lvds_priv =
+               (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv;
+
+       lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
+       lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
+       lvds_priv->saveLVDS = REG_READ(LVDS);
+       lvds_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
+       lvds_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
+       /*lvds_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR);*/
+       lvds_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+       lvds_priv->savePFIT_CONTROL = REG_READ(PFIT_CONTROL);
+       lvds_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
+
+       /*TODO: move backlight_duty_cycle to psb_intel_lvds_priv*/
+       dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
+                                               BACKLIGHT_DUTY_CYCLE_MASK);
+
+       /*
+        * If the light is off at server startup,
+        * just make it full brightness
+        */
+       if (dev_priv->backlight_duty_cycle == 0)
+               dev_priv->backlight_duty_cycle =
+               psb_intel_lvds_get_max_backlight(dev);
+
+       dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
+                       lvds_priv->savePP_ON,
+                       lvds_priv->savePP_OFF,
+                       lvds_priv->saveLVDS,
+                       lvds_priv->savePP_CONTROL,
+                       lvds_priv->savePP_CYCLE,
+                       lvds_priv->saveBLC_PWM_CTL);
+}
+
+static void psb_intel_lvds_restore(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       u32 pp_status;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+       struct psb_intel_lvds_priv *lvds_priv =
+               (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv;
+
+       dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
+                       lvds_priv->savePP_ON,
+                       lvds_priv->savePP_OFF,
+                       lvds_priv->saveLVDS,
+                       lvds_priv->savePP_CONTROL,
+                       lvds_priv->savePP_CYCLE,
+                       lvds_priv->saveBLC_PWM_CTL);
+
+       REG_WRITE(BLC_PWM_CTL, lvds_priv->saveBLC_PWM_CTL);
+       REG_WRITE(PFIT_CONTROL, lvds_priv->savePFIT_CONTROL);
+       REG_WRITE(PFIT_PGM_RATIOS, lvds_priv->savePFIT_PGM_RATIOS);
+       REG_WRITE(LVDSPP_ON, lvds_priv->savePP_ON);
+       REG_WRITE(LVDSPP_OFF, lvds_priv->savePP_OFF);
+       /*REG_WRITE(PP_DIVISOR, lvds_priv->savePP_DIVISOR);*/
+       REG_WRITE(PP_CYCLE, lvds_priv->savePP_CYCLE);
+       REG_WRITE(PP_CONTROL, lvds_priv->savePP_CONTROL);
+       REG_WRITE(LVDS, lvds_priv->saveLVDS);
+
+       if (lvds_priv->savePP_CONTROL & POWER_TARGET_ON) {
+               REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+                       POWER_TARGET_ON);
+               do {
+                       pp_status = REG_READ(PP_STATUS);
+               } while ((pp_status & PP_ON) == 0);
+       } else {
+               REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+                       ~POWER_TARGET_ON);
+               do {
+                       pp_status = REG_READ(PP_STATUS);
+               } while (pp_status & PP_ON);
+       }
+}
+
+int psb_intel_lvds_mode_valid(struct drm_connector *connector,
+                                struct drm_display_mode *mode)
+{
+       struct drm_psb_private *dev_priv = connector->dev->dev_private;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+       struct drm_display_mode *fixed_mode =
+                                       dev_priv->mode_dev.panel_fixed_mode;
+
+       if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2)
+               fixed_mode = dev_priv->mode_dev.panel_fixed_mode2;
+
+       /* just in case */
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
+       /* just in case */
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               return MODE_NO_INTERLACE;
+
+       if (fixed_mode) {
+               if (mode->hdisplay > fixed_mode->hdisplay)
+                       return MODE_PANEL;
+               if (mode->vdisplay > fixed_mode->vdisplay)
+                       return MODE_PANEL;
+       }
+       return MODE_OK;
+}
+
+bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+       struct psb_intel_crtc *psb_intel_crtc =
+                               to_psb_intel_crtc(encoder->crtc);
+       struct drm_encoder *tmp_encoder;
+       struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                               to_psb_intel_encoder(encoder);
+
+       if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2)
+               panel_fixed_mode = mode_dev->panel_fixed_mode2;
+
+       /* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */
+       if (!IS_MRST(dev) && psb_intel_crtc->pipe == 0) {
+               printk(KERN_ERR "Can't support LVDS on pipe A\n");
+               return false;
+       }
+       if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) {
+               printk(KERN_ERR "Must use PIPE A\n");
+               return false;
+       }
+       /* Should never happen!! */
+       list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
+                           head) {
+               if (tmp_encoder != encoder
+                   && tmp_encoder->crtc == encoder->crtc) {
+                       printk(KERN_ERR "Can't enable LVDS and another "
+                              "encoder on the same pipe\n");
+                       return false;
+               }
+       }
+
+       /*
+        * If we have timings from the BIOS for the panel, put them in
+        * to the adjusted mode.  The CRTC will be set up for this mode,
+        * with the panel scaling set up to source from the H/VDisplay
+        * of the original mode.
+        */
+       if (panel_fixed_mode != NULL) {
+               adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
+               adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
+               adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
+               adjusted_mode->htotal = panel_fixed_mode->htotal;
+               adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
+               adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
+               adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
+               adjusted_mode->vtotal = panel_fixed_mode->vtotal;
+               adjusted_mode->clock = panel_fixed_mode->clock;
+               drm_mode_set_crtcinfo(adjusted_mode,
+                                     CRTC_INTERLACE_HALVE_V);
+       }
+
+       /*
+        * XXX: It would be nice to support lower refresh rates on the
+        * panels to reduce power consumption, and perhaps match the
+        * user's requested refresh rate.
+        */
+
+       return true;
+}
+
+static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+       if (!gma_power_begin(dev, true))
+               return;
+
+       mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+       mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+                                         BACKLIGHT_DUTY_CYCLE_MASK);
+
+       psb_intel_lvds_set_power(dev, false);
+
+       gma_power_end(dev);
+}
+
+static void psb_intel_lvds_commit(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+       if (mode_dev->backlight_duty_cycle == 0)
+               mode_dev->backlight_duty_cycle =
+                   psb_intel_lvds_get_max_backlight(dev);
+
+       psb_intel_lvds_set_power(dev, true);
+}
+
+static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
+                               struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 pfit_control;
+
+       /*
+        * The LVDS pin pair will already have been turned on in the
+        * psb_intel_crtc_mode_set since it has a large impact on the DPLL
+        * settings.
+        */
+
+       /*
+        * Enable automatic panel scaling so that non-native modes fill the
+        * screen.  Should be enabled before the pipe is enabled, according to
+        * register description and PRM.
+        */
+       if (mode->hdisplay != adjusted_mode->hdisplay ||
+           mode->vdisplay != adjusted_mode->vdisplay)
+               pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
+                               HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
+                               HORIZ_INTERP_BILINEAR);
+       else
+               pfit_control = 0;
+
+       if (dev_priv->lvds_dither)
+               pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
+       REG_WRITE(PFIT_CONTROL, pfit_control);
+}
+
+/*
+ * Detect the LVDS connection.
+ *
+ * This always returns CONNECTOR_STATUS_CONNECTED.
+ * This connector should only have
+ * been set up if the LVDS was actually connected anyway.
+ */
+static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector
+                                                  *connector, bool force)
+{
+       return connector_status_connected;
+}
+
+/*
+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+ */
+static int psb_intel_lvds_get_modes(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+       struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
+       int ret = 0;
+
+       if (!IS_MRST(dev))
+               ret = psb_intel_ddc_get_modes(connector, &lvds_priv->i2c_bus->adapter);
+
+       if (ret)
+               return ret;
+
+       /* Didn't get an EDID, so
+        * Set wide sync ranges so we get all modes
+        * handed to valid_mode for checking
+        */
+       connector->display_info.min_vfreq = 0;
+       connector->display_info.max_vfreq = 200;
+       connector->display_info.min_hfreq = 0;
+       connector->display_info.max_hfreq = 200;
+
+       if (mode_dev->panel_fixed_mode != NULL) {
+               struct drm_display_mode *mode =
+                   drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
+               drm_mode_probed_add(connector, mode);
+               return 1;
+       }
+
+       return 0;
+}
+
+/**
+ * psb_intel_lvds_destroy - unregister and free LVDS structures
+ * @connector: connector to free
+ *
+ * Unregister the DDC bus for this connector then free the driver private
+ * structure.
+ */
+void psb_intel_lvds_destroy(struct drm_connector *connector)
+{
+       struct psb_intel_encoder *psb_intel_encoder =
+                                       psb_intel_attached_encoder(connector);
+       struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
+
+       if (lvds_priv->ddc_bus)
+               psb_intel_i2c_destroy(lvds_priv->ddc_bus);
+       drm_sysfs_connector_remove(connector);
+       drm_connector_cleanup(connector);
+       kfree(connector);
+}
+
+int psb_intel_lvds_set_property(struct drm_connector *connector,
+                                      struct drm_property *property,
+                                      uint64_t value)
+{
+       struct drm_encoder *encoder = connector->encoder;
+
+       if (!encoder)
+               return -1;
+
+       if (!strcmp(property->name, "scaling mode")) {
+               struct psb_intel_crtc *crtc =
+                                       to_psb_intel_crtc(encoder->crtc);
+               uint64_t curval;
+
+               if (!crtc)
+                       goto set_prop_error;
+
+               switch (value) {
+               case DRM_MODE_SCALE_FULLSCREEN:
+                       break;
+               case DRM_MODE_SCALE_NO_SCALE:
+                       break;
+               case DRM_MODE_SCALE_ASPECT:
+                       break;
+               default:
+                       goto set_prop_error;
+               }
+
+               if (drm_connector_property_get_value(connector,
+                                                    property,
+                                                    &curval))
+                       goto set_prop_error;
+
+               if (curval == value)
+                       goto set_prop_done;
+
+               if (drm_connector_property_set_value(connector,
+                                                       property,
+                                                       value))
+                       goto set_prop_error;
+
+               if (crtc->saved_mode.hdisplay != 0 &&
+                   crtc->saved_mode.vdisplay != 0) {
+                       if (!drm_crtc_helper_set_mode(encoder->crtc,
+                                                     &crtc->saved_mode,
+                                                     encoder->crtc->x,
+                                                     encoder->crtc->y,
+                                                     encoder->crtc->fb))
+                               goto set_prop_error;
+               }
+       } else if (!strcmp(property->name, "backlight")) {
+               if (drm_connector_property_set_value(connector,
+                                                       property,
+                                                       value))
+                       goto set_prop_error;
+               else {
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+                       struct drm_psb_private *devp =
+                                               encoder->dev->dev_private;
+                       struct backlight_device *bd = devp->backlight_device;
+                       if (bd) {
+                               bd->props.brightness = value;
+                               backlight_update_status(bd);
+                       }
+#endif
+               }
+       } else if (!strcmp(property->name, "DPMS")) {
+               struct drm_encoder_helper_funcs *hfuncs
+                                               = encoder->helper_private;
+               hfuncs->dpms(encoder, value);
+       }
+
+set_prop_done:
+       return 0;
+set_prop_error:
+       return -1;
+}
+
+static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = {
+       .dpms = psb_intel_lvds_encoder_dpms,
+       .mode_fixup = psb_intel_lvds_mode_fixup,
+       .prepare = psb_intel_lvds_prepare,
+       .mode_set = psb_intel_lvds_mode_set,
+       .commit = psb_intel_lvds_commit,
+};
+
+const struct drm_connector_helper_funcs
+                               psb_intel_lvds_connector_helper_funcs = {
+       .get_modes = psb_intel_lvds_get_modes,
+       .mode_valid = psb_intel_lvds_mode_valid,
+       .best_encoder = psb_intel_best_encoder,
+};
+
+const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .save = psb_intel_lvds_save,
+       .restore = psb_intel_lvds_restore,
+       .detect = psb_intel_lvds_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .set_property = psb_intel_lvds_set_property,
+       .destroy = psb_intel_lvds_destroy,
+};
+
+
+static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+       drm_encoder_cleanup(encoder);
+}
+
+const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
+       .destroy = psb_intel_lvds_enc_destroy,
+};
+
+
+
+/**
+ * psb_intel_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void psb_intel_lvds_init(struct drm_device *dev,
+                        struct psb_intel_mode_device *mode_dev)
+{
+       struct psb_intel_encoder *psb_intel_encoder;
+       struct psb_intel_connector *psb_intel_connector;
+       struct psb_intel_lvds_priv *lvds_priv;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+       struct drm_display_mode *scan;  /* *modes, *bios_mode; */
+       struct drm_crtc *crtc;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       u32 lvds;
+       int pipe;
+
+       psb_intel_encoder =
+                       kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+
+       if (!psb_intel_encoder) {
+               dev_err(dev->dev, "psb_intel_encoder allocation error\n");
+               return;
+       }
+
+       psb_intel_connector =
+               kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+
+       if (!psb_intel_connector) {
+               kfree(psb_intel_encoder);
+               dev_err(dev->dev, "psb_intel_connector allocation error\n");
+       }
+
+       lvds_priv = kzalloc(sizeof(struct psb_intel_lvds_priv), GFP_KERNEL);
+       if (!lvds_priv) {
+               dev_err(dev->dev, "LVDS private allocation error\n");
+               goto failed_connector;
+       }
+
+       psb_intel_encoder->dev_priv = lvds_priv;
+
+       connector = &psb_intel_connector->base;
+       encoder = &psb_intel_encoder->base;
+       drm_connector_init(dev, connector,
+                          &psb_intel_lvds_connector_funcs,
+                          DRM_MODE_CONNECTOR_LVDS);
+
+       drm_encoder_init(dev, encoder,
+                        &psb_intel_lvds_enc_funcs,
+                        DRM_MODE_ENCODER_LVDS);
+
+       psb_intel_connector_attach_encoder(psb_intel_connector,
+                                          psb_intel_encoder);
+       psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+
+       drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
+       drm_connector_helper_add(connector,
+                                &psb_intel_lvds_connector_helper_funcs);
+       connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+       connector->interlace_allowed = false;
+       connector->doublescan_allowed = false;
+
+       /*Attach connector properties*/
+       drm_connector_attach_property(connector,
+                                     dev->mode_config.scaling_mode_property,
+                                     DRM_MODE_SCALE_FULLSCREEN);
+       drm_connector_attach_property(connector,
+                                     dev_priv->backlight_property,
+                                     BRIGHTNESS_MAX_LEVEL);
+
+       /*
+        * Set up I2C bus
+        * FIXME: distroy i2c_bus when exit
+        */
+       lvds_priv->i2c_bus = psb_intel_i2c_create(dev, GPIOB, "LVDSBLC_B");
+       if (!lvds_priv->i2c_bus) {
+               dev_printk(KERN_ERR,
+                       &dev->pdev->dev, "I2C bus registration failed.\n");
+               goto failed_blc_i2c;
+       }
+       lvds_priv->i2c_bus->slave_addr = 0x2C;
+       dev_priv->lvds_i2c_bus =  lvds_priv->i2c_bus;
+
+       /*
+        * LVDS discovery:
+        * 1) check for EDID on DDC
+        * 2) check for VBT data
+        * 3) check to see if LVDS is already on
+        *    if none of the above, no panel
+        * 4) make sure lid is open
+        *    if closed, act like it's not there for now
+        */
+
+       /* Set up the DDC bus. */
+       lvds_priv->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
+       if (!lvds_priv->ddc_bus) {
+               dev_printk(KERN_ERR, &dev->pdev->dev,
+                          "DDC bus registration " "failed.\n");
+               goto failed_ddc;
+       }
+
+       /*
+        * Attempt to get the fixed panel mode from DDC.  Assume that the
+        * preferred mode is the right one.
+        */
+       psb_intel_ddc_get_modes(connector, &lvds_priv->ddc_bus->adapter);
+       list_for_each_entry(scan, &connector->probed_modes, head) {
+               if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+                       mode_dev->panel_fixed_mode =
+                           drm_mode_duplicate(dev, scan);
+                       goto out;       /* FIXME: check for quirks */
+               }
+       }
+
+       /* Failed to get EDID, what about VBT? do we need this? */
+       if (mode_dev->vbt_mode)
+               mode_dev->panel_fixed_mode =
+                   drm_mode_duplicate(dev, mode_dev->vbt_mode);
+
+       if (!mode_dev->panel_fixed_mode)
+               if (dev_priv->lfp_lvds_vbt_mode)
+                       mode_dev->panel_fixed_mode =
+                               drm_mode_duplicate(dev,
+                                       dev_priv->lfp_lvds_vbt_mode);
+
+       /*
+        * If we didn't get EDID, try checking if the panel is already turned
+        * on.  If so, assume that whatever is currently programmed is the
+        * correct mode.
+        */
+       lvds = REG_READ(LVDS);
+       pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+       crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
+
+       if (crtc && (lvds & LVDS_PORT_EN)) {
+               mode_dev->panel_fixed_mode =
+                   psb_intel_crtc_mode_get(dev, crtc);
+               if (mode_dev->panel_fixed_mode) {
+                       mode_dev->panel_fixed_mode->type |=
+                           DRM_MODE_TYPE_PREFERRED;
+                       goto out;       /* FIXME: check for quirks */
+               }
+       }
+
+       /* If we still don't have a mode after all that, give up. */
+       if (!mode_dev->panel_fixed_mode) {
+               dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
+               goto failed_find;
+       }
+
+       /*
+        * Blacklist machines with BIOSes that list an LVDS panel without
+        * actually having one.
+        */
+out:
+       drm_sysfs_connector_add(connector);
+       return;
+
+failed_find:
+       if (lvds_priv->ddc_bus)
+               psb_intel_i2c_destroy(lvds_priv->ddc_bus);
+failed_ddc:
+       if (lvds_priv->i2c_bus)
+               psb_intel_i2c_destroy(lvds_priv->i2c_bus);
+failed_blc_i2c:
+       drm_encoder_cleanup(encoder);
+       drm_connector_cleanup(connector);
+failed_connector:
+       if (psb_intel_connector)
+               kfree(psb_intel_connector);
+}
+
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
new file mode 100644 (file)
index 0000000..4fca0d6
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2007 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authers: Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/fb.h>
+#include <drm/drmP.h>
+#include "psb_intel_drv.h"
+
+/**
+ * psb_intel_ddc_probe
+ *
+ */
+bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
+{
+       u8 out_buf[] = { 0x0, 0x0 };
+       u8 buf[2];
+       int ret;
+       struct i2c_msg msgs[] = {
+               {
+                .addr = 0x50,
+                .flags = 0,
+                .len = 1,
+                .buf = out_buf,
+                },
+               {
+                .addr = 0x50,
+                .flags = I2C_M_RD,
+                .len = 1,
+                .buf = buf,
+                }
+       };
+
+       ret = i2c_transfer(adapter, msgs, 2);
+       if (ret == 2)
+               return true;
+
+       return false;
+}
+
+/**
+ * psb_intel_ddc_get_modes - get modelist from monitor
+ * @connector: DRM connector device to use
+ *
+ * Fetch the EDID information from @connector using the DDC bus.
+ */
+int psb_intel_ddc_get_modes(struct drm_connector *connector,
+                           struct i2c_adapter *adapter)
+{
+       struct edid *edid;
+       int ret = 0;
+
+       edid = drm_get_edid(connector, adapter);
+       if (edid) {
+               drm_mode_connector_update_edid_property(connector, edid);
+               ret = drm_add_edid_modes(connector, edid);
+               kfree(edid);
+       }
+       return ret;
+}
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
new file mode 100644 (file)
index 0000000..fcc0af0
--- /dev/null
@@ -0,0 +1,1309 @@
+/*
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __PSB_INTEL_REG_H__
+#define __PSB_INTEL_REG_H__
+
+/*
+ * GPIO regs
+ */
+#define GPIOA                  0x5010
+#define GPIOB                  0x5014
+#define GPIOC                  0x5018
+#define GPIOD                  0x501c
+#define GPIOE                  0x5020
+#define GPIOF                  0x5024
+#define GPIOG                  0x5028
+#define GPIOH                  0x502c
+# define GPIO_CLOCK_DIR_MASK           (1 << 0)
+# define GPIO_CLOCK_DIR_IN             (0 << 1)
+# define GPIO_CLOCK_DIR_OUT            (1 << 1)
+# define GPIO_CLOCK_VAL_MASK           (1 << 2)
+# define GPIO_CLOCK_VAL_OUT            (1 << 3)
+# define GPIO_CLOCK_VAL_IN             (1 << 4)
+# define GPIO_CLOCK_PULLUP_DISABLE     (1 << 5)
+# define GPIO_DATA_DIR_MASK            (1 << 8)
+# define GPIO_DATA_DIR_IN              (0 << 9)
+# define GPIO_DATA_DIR_OUT             (1 << 9)
+# define GPIO_DATA_VAL_MASK            (1 << 10)
+# define GPIO_DATA_VAL_OUT             (1 << 11)
+# define GPIO_DATA_VAL_IN              (1 << 12)
+# define GPIO_DATA_PULLUP_DISABLE      (1 << 13)
+
+#define GMBUS0                 0x5100 /* clock/port select */
+#define   GMBUS_RATE_100KHZ    (0<<8)
+#define   GMBUS_RATE_50KHZ     (1<<8)
+#define   GMBUS_RATE_400KHZ    (2<<8) /* reserved on Pineview */
+#define   GMBUS_RATE_1MHZ      (3<<8) /* reserved on Pineview */
+#define   GMBUS_HOLD_EXT       (1<<7) /* 300ns hold time, rsvd on Pineview */
+#define   GMBUS_PORT_DISABLED  0
+#define   GMBUS_PORT_SSC       1
+#define   GMBUS_PORT_VGADDC    2
+#define   GMBUS_PORT_PANEL     3
+#define   GMBUS_PORT_DPC       4 /* HDMIC */
+#define   GMBUS_PORT_DPB       5 /* SDVO, HDMIB */
+                                 /* 6 reserved */
+#define   GMBUS_PORT_DPD       7 /* HDMID */
+#define   GMBUS_NUM_PORTS       8
+#define GMBUS1                 0x5104 /* command/status */
+#define   GMBUS_SW_CLR_INT     (1<<31)
+#define   GMBUS_SW_RDY         (1<<30)
+#define   GMBUS_ENT            (1<<29) /* enable timeout */
+#define   GMBUS_CYCLE_NONE     (0<<25)
+#define   GMBUS_CYCLE_WAIT     (1<<25)
+#define   GMBUS_CYCLE_INDEX    (2<<25)
+#define   GMBUS_CYCLE_STOP     (4<<25)
+#define   GMBUS_BYTE_COUNT_SHIFT 16
+#define   GMBUS_SLAVE_INDEX_SHIFT 8
+#define   GMBUS_SLAVE_ADDR_SHIFT 1
+#define   GMBUS_SLAVE_READ     (1<<0)
+#define   GMBUS_SLAVE_WRITE    (0<<0)
+#define GMBUS2                 0x5108 /* status */
+#define   GMBUS_INUSE          (1<<15)
+#define   GMBUS_HW_WAIT_PHASE  (1<<14)
+#define   GMBUS_STALL_TIMEOUT  (1<<13)
+#define   GMBUS_INT            (1<<12)
+#define   GMBUS_HW_RDY         (1<<11)
+#define   GMBUS_SATOER         (1<<10)
+#define   GMBUS_ACTIVE         (1<<9)
+#define GMBUS3                 0x510c /* data buffer bytes 3-0 */
+#define GMBUS4                 0x5110 /* interrupt mask (Pineview+) */
+#define   GMBUS_SLAVE_TIMEOUT_EN (1<<4)
+#define   GMBUS_NAK_EN         (1<<3)
+#define   GMBUS_IDLE_EN                (1<<2)
+#define   GMBUS_HW_WAIT_EN     (1<<1)
+#define   GMBUS_HW_RDY_EN      (1<<0)
+#define GMBUS5                 0x5120 /* byte index */
+#define   GMBUS_2BYTE_INDEX_EN (1<<31)
+
+#define BLC_PWM_CTL            0x61254
+#define BLC_PWM_CTL2           0x61250
+#define BLC_PWM_CTL_C          0x62254
+#define BLC_PWM_CTL2_C         0x62250
+#define BACKLIGHT_MODULATION_FREQ_SHIFT                (17)
+/*
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
+#define BLM_LEGACY_MODE                        (1 << 16)
+/*
+ * This is the number of cycles out of the backlight modulation cycle for which
+ * the backlight is on.
+ *
+ * This field must be no greater than the number of cycles in the complete
+ * backlight modulation cycle.
+ */
+#define BACKLIGHT_DUTY_CYCLE_SHIFT     (0)
+#define BACKLIGHT_DUTY_CYCLE_MASK      (0xffff)
+
+#define I915_GCFGC                     0xf0
+#define I915_LOW_FREQUENCY_ENABLE      (1 << 7)
+#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
+#define I915_DISPLAY_CLOCK_333_MHZ     (4 << 4)
+#define I915_DISPLAY_CLOCK_MASK                (7 << 4)
+
+#define I855_HPLLCC                    0xc0
+#define I855_CLOCK_CONTROL_MASK                (3 << 0)
+#define I855_CLOCK_133_200             (0 << 0)
+#define I855_CLOCK_100_200             (1 << 0)
+#define I855_CLOCK_100_133             (2 << 0)
+#define I855_CLOCK_166_250             (3 << 0)
+
+/* I830 CRTC registers */
+#define HTOTAL_A               0x60000
+#define HBLANK_A               0x60004
+#define HSYNC_A                        0x60008
+#define VTOTAL_A               0x6000c
+#define VBLANK_A               0x60010
+#define VSYNC_A                        0x60014
+#define PIPEASRC               0x6001c
+#define BCLRPAT_A              0x60020
+#define VSYNCSHIFT_A           0x60028
+
+#define HTOTAL_B               0x61000
+#define HBLANK_B               0x61004
+#define HSYNC_B                        0x61008
+#define VTOTAL_B               0x6100c
+#define VBLANK_B               0x61010
+#define VSYNC_B                        0x61014
+#define PIPEBSRC               0x6101c
+#define BCLRPAT_B              0x61020
+#define VSYNCSHIFT_B           0x61028
+
+#define HTOTAL_C               0x62000
+#define HBLANK_C               0x62004
+#define HSYNC_C                        0x62008
+#define VTOTAL_C               0x6200c
+#define VBLANK_C               0x62010
+#define VSYNC_C                        0x62014
+#define PIPECSRC               0x6201c
+#define BCLRPAT_C              0x62020
+#define VSYNCSHIFT_C           0x62028
+
+#define PP_STATUS              0x61200
+# define PP_ON                         (1 << 31)
+/*
+ * Indicates that all dependencies of the panel are on:
+ *
+ * - PLL enabled
+ * - pipe enabled
+ * - LVDS/DVOB/DVOC on
+ */
+#define PP_READY                       (1 << 30)
+#define PP_SEQUENCE_NONE               (0 << 28)
+#define PP_SEQUENCE_ON                 (1 << 28)
+#define PP_SEQUENCE_OFF                        (2 << 28)
+#define PP_SEQUENCE_MASK               0x30000000
+#define PP_CONTROL             0x61204
+#define POWER_TARGET_ON                        (1 << 0)
+
+#define LVDSPP_ON              0x61208
+#define LVDSPP_OFF             0x6120c
+#define PP_CYCLE               0x61210
+
+#define PFIT_CONTROL           0x61230
+#define PFIT_ENABLE                    (1 << 31)
+#define PFIT_PIPE_MASK                 (3 << 29)
+#define PFIT_PIPE_SHIFT                        29
+#define PFIT_SCALING_MODE_PILLARBOX    (1 << 27)
+#define PFIT_SCALING_MODE_LETTERBOX    (3 << 26)
+#define VERT_INTERP_DISABLE            (0 << 10)
+#define VERT_INTERP_BILINEAR           (1 << 10)
+#define VERT_INTERP_MASK               (3 << 10)
+#define VERT_AUTO_SCALE                        (1 << 9)
+#define HORIZ_INTERP_DISABLE           (0 << 6)
+#define HORIZ_INTERP_BILINEAR          (1 << 6)
+#define HORIZ_INTERP_MASK              (3 << 6)
+#define HORIZ_AUTO_SCALE               (1 << 5)
+#define PANEL_8TO6_DITHER_ENABLE       (1 << 3)
+
+#define PFIT_PGM_RATIOS                0x61234
+#define PFIT_VERT_SCALE_MASK                   0xfff00000
+#define PFIT_HORIZ_SCALE_MASK                  0x0000fff0
+
+#define PFIT_AUTO_RATIOS       0x61238
+
+#define DPLL_A                 0x06014
+#define DPLL_B                 0x06018
+#define DPLL_VCO_ENABLE                        (1 << 31)
+#define DPLL_DVO_HIGH_SPEED            (1 << 30)
+#define DPLL_SYNCLOCK_ENABLE           (1 << 29)
+#define DPLL_VGA_MODE_DIS              (1 << 28)
+#define DPLLB_MODE_DAC_SERIAL          (1 << 26)       /* i915 */
+#define DPLLB_MODE_LVDS                        (2 << 26)       /* i915 */
+#define DPLL_MODE_MASK                 (3 << 26)
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10        (0 << 24)       /* i915 */
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24)       /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_14     (0 << 24)       /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_7      (1 << 24)       /* i915 */
+#define DPLL_P2_CLOCK_DIV_MASK         0x03000000      /* i915 */
+#define DPLL_FPA01_P1_POST_DIV_MASK    0x00ff0000      /* i915 */
+#define DPLL_LOCK                      (1 << 15)       /* CDV */
+
+/*
+ *  The i830 generation, in DAC/serial mode, defines p1 as two plus this
+ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
+ */
+# define DPLL_FPA01_P1_POST_DIV_MASK_I830      0x001f0000
+/*
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS  0x003f0000
+#define DPLL_FPA01_P1_POST_DIV_SHIFT   16
+#define PLL_P2_DIVIDE_BY_4             (1 << 23)       /* i830, required
+                                                        * in DVO non-gang */
+# define PLL_P1_DIVIDE_BY_TWO          (1 << 21)       /* i830 */
+#define PLL_REF_INPUT_DREFCLK          (0 << 13)
+#define PLL_REF_INPUT_TVCLKINA         (1 << 13)       /* i830 */
+#define PLL_REF_INPUT_TVCLKINBC                (2 << 13)       /* SDVO
+                                                                * TVCLKIN */
+#define PLLB_REF_INPUT_SPREADSPECTRUMIN        (3 << 13)
+#define PLL_REF_INPUT_MASK             (3 << 13)
+#define PLL_LOAD_PULSE_PHASE_SHIFT     9
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+#define PLL_LOAD_PULSE_PHASE_MASK      (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+#define DISPLAY_RATE_SELECT_FPA1       (1 << 8)
+
+/*
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ *
+ * DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_MULTIPLIER_MASK           0x000000ff
+#define SDVO_MULTIPLIER_SHIFT_HIRES    4
+#define SDVO_MULTIPLIER_SHIFT_VGA      0
+
+/*
+ * PLL_MD
+ */
+/* Pipe A SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_A_MD              0x0601c
+/* Pipe B SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_B_MD              0x06020
+/*
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1.  Must be set to 1 pixel for SDVO.
+ */
+#define DPLL_MD_UDI_DIVIDER_MASK       0x3f000000
+#define DPLL_MD_UDI_DIVIDER_SHIFT      24
+/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+#define DPLL_MD_VGA_UDI_DIVIDER_MASK   0x003f0000
+#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT  16
+/*
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock.  At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz.  The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+#define DPLL_MD_UDI_MULTIPLIER_MASK    0x00003f00
+#define DPLL_MD_UDI_MULTIPLIER_SHIFT   8
+/*
+ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK        0x0000003f
+#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
+
+#define DPLL_TEST              0x606c
+#define DPLLB_TEST_SDVO_DIV_1          (0 << 22)
+#define DPLLB_TEST_SDVO_DIV_2          (1 << 22)
+#define DPLLB_TEST_SDVO_DIV_4          (2 << 22)
+#define DPLLB_TEST_SDVO_DIV_MASK       (3 << 22)
+#define DPLLB_TEST_N_BYPASS            (1 << 19)
+#define DPLLB_TEST_M_BYPASS            (1 << 18)
+#define DPLLB_INPUT_BUFFER_ENABLE      (1 << 16)
+#define DPLLA_TEST_N_BYPASS            (1 << 3)
+#define DPLLA_TEST_M_BYPASS            (1 << 2)
+#define DPLLA_INPUT_BUFFER_ENABLE      (1 << 0)
+
+#define ADPA                   0x61100
+#define ADPA_DAC_ENABLE                        (1 << 31)
+#define ADPA_DAC_DISABLE               0
+#define ADPA_PIPE_SELECT_MASK          (1 << 30)
+#define ADPA_PIPE_A_SELECT             0
+#define ADPA_PIPE_B_SELECT             (1 << 30)
+#define ADPA_USE_VGA_HVPOLARITY                (1 << 15)
+#define ADPA_SETS_HVPOLARITY           0
+#define ADPA_VSYNC_CNTL_DISABLE                (1 << 11)
+#define ADPA_VSYNC_CNTL_ENABLE         0
+#define ADPA_HSYNC_CNTL_DISABLE                (1 << 10)
+#define ADPA_HSYNC_CNTL_ENABLE         0
+#define ADPA_VSYNC_ACTIVE_HIGH         (1 << 4)
+#define ADPA_VSYNC_ACTIVE_LOW          0
+#define ADPA_HSYNC_ACTIVE_HIGH         (1 << 3)
+#define ADPA_HSYNC_ACTIVE_LOW          0
+
+#define FPA0                   0x06040
+#define FPA1                   0x06044
+#define FPB0                   0x06048
+#define FPB1                   0x0604c
+#define FP_N_DIV_MASK                  0x003f0000
+#define FP_N_DIV_SHIFT                 16
+#define FP_M1_DIV_MASK                 0x00003f00
+#define FP_M1_DIV_SHIFT                        8
+#define FP_M2_DIV_MASK                 0x0000003f
+#define FP_M2_DIV_SHIFT                        0
+
+#define PORT_HOTPLUG_EN                0x61110
+#define SDVOB_HOTPLUG_INT_EN           (1 << 26)
+#define SDVOC_HOTPLUG_INT_EN           (1 << 25)
+#define TV_HOTPLUG_INT_EN              (1 << 18)
+#define CRT_HOTPLUG_INT_EN             (1 << 9)
+#define CRT_HOTPLUG_FORCE_DETECT       (1 << 3)
+/* CDV.. */
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_64       (1 << 8)
+#define CRT_HOTPLUG_DAC_ON_TIME_2M             (0 << 7)
+#define CRT_HOTPLUG_DAC_ON_TIME_4M             (1 << 7)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_40         (0 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_50         (1 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_60         (2 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_70         (3 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK       (3 << 5)
+#define CRT_HOTPLUG_DETECT_DELAY_1G            (0 << 4)
+#define CRT_HOTPLUG_DETECT_DELAY_2G            (1 << 4)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV       (0 << 2)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV       (1 << 2)
+#define CRT_HOTPLUG_DETECT_MASK                        0x000000F8
+
+#define PORT_HOTPLUG_STAT      0x61114
+#define CRT_HOTPLUG_INT_STATUS         (1 << 11)
+#define TV_HOTPLUG_INT_STATUS          (1 << 10)
+#define CRT_HOTPLUG_MONITOR_MASK       (3 << 8)
+#define CRT_HOTPLUG_MONITOR_COLOR      (3 << 8)
+#define CRT_HOTPLUG_MONITOR_MONO       (2 << 8)
+#define CRT_HOTPLUG_MONITOR_NONE       (0 << 8)
+#define SDVOC_HOTPLUG_INT_STATUS       (1 << 7)
+#define SDVOB_HOTPLUG_INT_STATUS       (1 << 6)
+
+#define SDVOB                  0x61140
+#define SDVOC                  0x61160
+#define SDVO_ENABLE                    (1 << 31)
+#define SDVO_PIPE_B_SELECT             (1 << 30)
+#define SDVO_STALL_SELECT              (1 << 29)
+#define SDVO_INTERRUPT_ENABLE          (1 << 26)
+#define SDVO_COLOR_RANGE_16_235                (1 << 8)
+#define SDVO_AUDIO_ENABLE              (1 << 6)
+
+/**
+ * 915G/GM SDVO pixel multiplier.
+ *
+ * Programmed value is multiplier - 1, up to 5x.
+ *
+ * DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_PORT_MULTIPLY_MASK                (7 << 23)
+#define SDVO_PORT_MULTIPLY_SHIFT       23
+#define SDVO_PHASE_SELECT_MASK         (15 << 19)
+#define SDVO_PHASE_SELECT_DEFAULT      (6 << 19)
+#define SDVO_CLOCK_OUTPUT_INVERT       (1 << 18)
+#define SDVOC_GANG_MODE                        (1 << 16)
+#define SDVO_BORDER_ENABLE             (1 << 7)
+#define SDVOB_PCIE_CONCURRENCY         (1 << 3)
+#define SDVO_DETECTED                  (1 << 2)
+/* Bits to be preserved when writing */
+#define SDVOB_PRESERVE_MASK            ((1 << 17) | (1 << 16) | (1 << 14))
+#define SDVOC_PRESERVE_MASK            (1 << 17)
+
+/*
+ * This register controls the LVDS output enable, pipe selection, and data
+ * format selection.
+ *
+ * All of the clock/data pairs are force powered down by power sequencing.
+ */
+#define LVDS                   0x61180
+/*
+ * Enables the LVDS port.  This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+#define LVDS_PORT_EN                   (1 << 31)
+/* Selects pipe B for LVDS data.  Must be set on pre-965. */
+#define LVDS_PIPEB_SELECT              (1 << 30)
+
+/* Turns on border drawing to allow centered display. */
+#define LVDS_BORDER_EN                 (1 << 15)
+
+/*
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+#define LVDS_A0A2_CLKA_POWER_MASK      (3 << 8)
+#define LVDS_A0A2_CLKA_POWER_DOWN      (0 << 8)
+#define LVDS_A0A2_CLKA_POWER_UP                (3 << 8)
+/*
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode.  Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+#define LVDS_A3_POWER_MASK             (3 << 6)
+#define LVDS_A3_POWER_DOWN             (0 << 6)
+#define LVDS_A3_POWER_UP               (3 << 6)
+/*
+ * Controls the CLKB pair.  This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+#define LVDS_CLKB_POWER_MASK           (3 << 4)
+#define LVDS_CLKB_POWER_DOWN           (0 << 4)
+#define LVDS_CLKB_POWER_UP             (3 << 4)
+/*
+ * Controls the B0-B3 data pairs.  This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode.  The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+#define LVDS_B0B3_POWER_MASK           (3 << 2)
+#define LVDS_B0B3_POWER_DOWN           (0 << 2)
+#define LVDS_B0B3_POWER_UP             (3 << 2)
+
+#define PIPEACONF              0x70008
+#define PIPEACONF_ENABLE               (1 << 31)
+#define PIPEACONF_DISABLE              0
+#define PIPEACONF_DOUBLE_WIDE          (1 << 30)
+#define PIPECONF_ACTIVE                        (1 << 30)
+#define I965_PIPECONF_ACTIVE           (1 << 30)
+#define PIPECONF_DSIPLL_LOCK           (1 << 29)
+#define PIPEACONF_SINGLE_WIDE          0
+#define PIPEACONF_PIPE_UNLOCKED                0
+#define PIPEACONF_DSR                  (1 << 26)
+#define PIPEACONF_PIPE_LOCKED          (1 << 25)
+#define PIPEACONF_PALETTE              0
+#define PIPECONF_FORCE_BORDER          (1 << 25)
+#define PIPEACONF_GAMMA                        (1 << 24)
+#define PIPECONF_PROGRESSIVE           (0 << 21)
+#define PIPECONF_INTERLACE_W_FIELD_INDICATION  (6 << 21)
+#define PIPECONF_INTERLACE_FIELD_0_ONLY                (7 << 21)
+#define PIPECONF_PLANE_OFF             (1 << 19)
+#define PIPECONF_CURSOR_OFF            (1 << 18)
+
+#define PIPEBCONF              0x71008
+#define PIPEBCONF_ENABLE               (1 << 31)
+#define PIPEBCONF_DISABLE              0
+#define PIPEBCONF_DOUBLE_WIDE          (1 << 30)
+#define PIPEBCONF_DISABLE              0
+#define PIPEBCONF_GAMMA                        (1 << 24)
+#define PIPEBCONF_PALETTE              0
+
+#define PIPECCONF              0x72008
+
+#define PIPEBGCMAXRED          0x71010
+#define PIPEBGCMAXGREEN                0x71014
+#define PIPEBGCMAXBLUE         0x71018
+
+#define PIPEASTAT              0x70024
+#define PIPEBSTAT              0x71024
+#define PIPECSTAT              0x72024
+#define PIPE_VBLANK_INTERRUPT_STATUS           (1UL << 1)
+#define PIPE_START_VBLANK_INTERRUPT_STATUS     (1UL << 2)
+#define PIPE_VBLANK_CLEAR                      (1 << 1)
+#define PIPE_VBLANK_STATUS                     (1 << 1)
+#define PIPE_TE_STATUS                         (1UL << 6)
+#define PIPE_DPST_EVENT_STATUS                 (1UL << 7)
+#define PIPE_VSYNC_CLEAR                       (1UL << 9)
+#define PIPE_VSYNC_STATUS                      (1UL << 9)
+#define PIPE_HDMI_AUDIO_UNDERRUN_STATUS                (1UL << 10)
+#define PIPE_HDMI_AUDIO_BUFFER_DONE_STATUS     (1UL << 11)
+#define PIPE_VBLANK_INTERRUPT_ENABLE           (1UL << 17)
+#define PIPE_START_VBLANK_INTERRUPT_ENABLE     (1UL << 18)
+#define PIPE_TE_ENABLE                         (1UL << 22)
+#define PIPE_DPST_EVENT_ENABLE                 (1UL << 23)
+#define PIPE_VSYNC_ENABL                       (1UL << 25)
+#define PIPE_HDMI_AUDIO_UNDERRUN               (1UL << 26)
+#define PIPE_HDMI_AUDIO_BUFFER_DONE            (1UL << 27)
+#define PIPE_HDMI_AUDIO_INT_MASK               (PIPE_HDMI_AUDIO_UNDERRUN | \
+                                               PIPE_HDMI_AUDIO_BUFFER_DONE)
+#define PIPE_EVENT_MASK ((1 << 29)|(1 << 28)|(1 << 27)|(1 << 26)|(1 << 24)|(1 << 23)|(1 << 22)|(1 << 21)|(1 << 20)|(1 << 16))
+#define PIPE_VBLANK_MASK ((1 << 25)|(1 << 24)|(1 << 18)|(1 << 17))
+#define HISTOGRAM_INT_CONTROL          0x61268
+#define HISTOGRAM_BIN_DATA             0X61264
+#define HISTOGRAM_LOGIC_CONTROL                0x61260
+#define PWM_CONTROL_LOGIC              0x61250
+#define PIPE_HOTPLUG_INTERRUPT_STATUS          (1UL << 10)
+#define HISTOGRAM_INTERRUPT_ENABLE             (1UL << 31)
+#define HISTOGRAM_LOGIC_ENABLE                 (1UL << 31)
+#define PWM_LOGIC_ENABLE                       (1UL << 31)
+#define PWM_PHASEIN_ENABLE                     (1UL << 25)
+#define PWM_PHASEIN_INT_ENABLE                 (1UL << 24)
+#define PWM_PHASEIN_VB_COUNT                   0x00001f00
+#define PWM_PHASEIN_INC                                0x0000001f
+#define HISTOGRAM_INT_CTRL_CLEAR               (1UL << 30)
+#define DPST_YUV_LUMA_MODE                     0
+
+struct dpst_ie_histogram_control {
+       union {
+               uint32_t data;
+               struct {
+                       uint32_t bin_reg_index:7;
+                       uint32_t reserved:4;
+                       uint32_t bin_reg_func_select:1;
+                       uint32_t sync_to_phase_in:1;
+                       uint32_t alt_enhancement_mode:2;
+                       uint32_t reserved1:1;
+                       uint32_t sync_to_phase_in_count:8;
+                       uint32_t histogram_mode_select:1;
+                       uint32_t reserved2:4;
+                       uint32_t ie_pipe_assignment:1;
+                       uint32_t ie_mode_table_enabled:1;
+                       uint32_t ie_histogram_enable:1;
+               };
+       };
+};
+
+struct dpst_guardband {
+       union {
+               uint32_t data;
+               struct {
+                       uint32_t guardband:22;
+                       uint32_t guardband_interrupt_delay:8;
+                       uint32_t interrupt_status:1;
+                       uint32_t interrupt_enable:1;
+               };
+       };
+};
+
+#define PIPEAFRAMEHIGH         0x70040
+#define PIPEAFRAMEPIXEL                0x70044
+#define PIPEBFRAMEHIGH         0x71040
+#define PIPEBFRAMEPIXEL                0x71044
+#define PIPECFRAMEHIGH         0x72040
+#define PIPECFRAMEPIXEL                0x72044
+#define PIPE_FRAME_HIGH_MASK   0x0000ffff
+#define PIPE_FRAME_HIGH_SHIFT  0
+#define PIPE_FRAME_LOW_MASK    0xff000000
+#define PIPE_FRAME_LOW_SHIFT   24
+#define PIPE_PIXEL_MASK                0x00ffffff
+#define PIPE_PIXEL_SHIFT       0
+
+#define DSPARB                 0x70030
+#define DSPFW1                 0x70034
+#define DSPFW2                 0x70038
+#define DSPFW3                 0x7003c
+#define DSPFW4                 0x70050
+#define DSPFW5                 0x70054
+#define DSPFW6                 0x70058
+#define DSPCHICKENBIT          0x70400
+#define DSPACNTR               0x70180
+#define DSPBCNTR               0x71180
+#define DSPCCNTR               0x72180
+#define DISPLAY_PLANE_ENABLE                   (1 << 31)
+#define DISPLAY_PLANE_DISABLE                  0
+#define DISPPLANE_GAMMA_ENABLE                 (1 << 30)
+#define DISPPLANE_GAMMA_DISABLE                        0
+#define DISPPLANE_PIXFORMAT_MASK               (0xf << 26)
+#define DISPPLANE_8BPP                         (0x2 << 26)
+#define DISPPLANE_15_16BPP                     (0x4 << 26)
+#define DISPPLANE_16BPP                                (0x5 << 26)
+#define DISPPLANE_32BPP_NO_ALPHA               (0x6 << 26)
+#define DISPPLANE_32BPP                                (0x7 << 26)
+#define DISPPLANE_STEREO_ENABLE                        (1 << 25)
+#define DISPPLANE_STEREO_DISABLE               0
+#define DISPPLANE_SEL_PIPE_MASK                        (1 << 24)
+#define DISPPLANE_SEL_PIPE_POS                 24
+#define DISPPLANE_SEL_PIPE_A                   0
+#define DISPPLANE_SEL_PIPE_B                   (1 << 24)
+#define DISPPLANE_SRC_KEY_ENABLE               (1 << 22)
+#define DISPPLANE_SRC_KEY_DISABLE              0
+#define DISPPLANE_LINE_DOUBLE                  (1 << 20)
+#define DISPPLANE_NO_LINE_DOUBLE               0
+#define DISPPLANE_STEREO_POLARITY_FIRST                0
+#define DISPPLANE_STEREO_POLARITY_SECOND       (1 << 18)
+/* plane B only */
+#define DISPPLANE_ALPHA_TRANS_ENABLE           (1 << 15)
+#define DISPPLANE_ALPHA_TRANS_DISABLE          0
+#define DISPPLANE_SPRITE_ABOVE_DISPLAYA                0
+#define DISPPLANE_SPRITE_ABOVE_OVERLAY         (1)
+#define DISPPLANE_BOTTOM                       (4)
+
+#define DSPABASE               0x70184
+#define DSPALINOFF             0x70184
+#define DSPASTRIDE             0x70188
+
+#define DSPBBASE               0x71184
+#define DSPBLINOFF             0X71184
+#define DSPBADDR               DSPBBASE
+#define DSPBSTRIDE             0x71188
+
+#define DSPCBASE               0x72184
+#define DSPCLINOFF             0x72184
+#define DSPCSTRIDE             0x72188
+
+#define DSPAKEYVAL             0x70194
+#define DSPAKEYMASK            0x70198
+
+#define DSPAPOS                        0x7018C /* reserved */
+#define DSPASIZE               0x70190
+#define DSPBPOS                        0x7118C
+#define DSPBSIZE               0x71190
+#define DSPCPOS                        0x7218C
+#define DSPCSIZE               0x72190
+
+#define DSPASURF               0x7019C
+#define DSPATILEOFF            0x701A4
+
+#define DSPBSURF               0x7119C
+#define DSPBTILEOFF            0x711A4
+
+#define DSPCSURF               0x7219C
+#define DSPCTILEOFF            0x721A4
+#define DSPCKEYMAXVAL          0x721A0
+#define DSPCKEYMINVAL          0x72194
+#define DSPCKEYMSK             0x72198
+
+#define VGACNTRL               0x71400
+#define VGA_DISP_DISABLE               (1 << 31)
+#define VGA_2X_MODE                    (1 << 30)
+#define VGA_PIPE_B_SELECT              (1 << 29)
+
+/*
+ * Overlay registers
+ */
+#define OV_C_OFFSET            0x08000
+#define OV_OVADD               0x30000
+#define OV_DOVASTA             0x30008
+# define OV_PIPE_SELECT                        ((1 << 6)|(1 << 7))
+# define OV_PIPE_SELECT_POS            6
+# define OV_PIPE_A                     0
+# define OV_PIPE_C                     1
+#define OV_OGAMC5              0x30010
+#define OV_OGAMC4              0x30014
+#define OV_OGAMC3              0x30018
+#define OV_OGAMC2              0x3001C
+#define OV_OGAMC1              0x30020
+#define OV_OGAMC0              0x30024
+#define OVC_OVADD              0x38000
+#define OVC_DOVCSTA            0x38008
+#define OVC_OGAMC5             0x38010
+#define OVC_OGAMC4             0x38014
+#define OVC_OGAMC3             0x38018
+#define OVC_OGAMC2             0x3801C
+#define OVC_OGAMC1             0x38020
+#define OVC_OGAMC0             0x38024
+
+/*
+ * Some BIOS scratch area registers.  The 845 (and 830?) store the amount
+ * of video memory available to the BIOS in SWF1.
+ */
+#define SWF0                   0x71410
+#define SWF1                   0x71414
+#define SWF2                   0x71418
+#define SWF3                   0x7141c
+#define SWF4                   0x71420
+#define SWF5                   0x71424
+#define SWF6                   0x71428
+
+/*
+ * 855 scratch registers.
+ */
+#define SWF00                  0x70410
+#define SWF01                  0x70414
+#define SWF02                  0x70418
+#define SWF03                  0x7041c
+#define SWF04                  0x70420
+#define SWF05                  0x70424
+#define SWF06                  0x70428
+
+#define SWF10                  SWF0
+#define SWF11                  SWF1
+#define SWF12                  SWF2
+#define SWF13                  SWF3
+#define SWF14                  SWF4
+#define SWF15                  SWF5
+#define SWF16                  SWF6
+
+#define SWF30                  0x72414
+#define SWF31                  0x72418
+#define SWF32                  0x7241c
+
+
+/*
+ * Palette registers
+ */
+#define PALETTE_A              0x0a000
+#define PALETTE_B              0x0a800
+#define PALETTE_C              0x0ac00
+
+/* Cursor A & B regs */
+#define CURACNTR               0x70080
+#define CURSOR_MODE_DISABLE            0x00
+#define CURSOR_MODE_64_32B_AX          0x07
+#define CURSOR_MODE_64_ARGB_AX         ((1 << 5) | CURSOR_MODE_64_32B_AX)
+#define MCURSOR_GAMMA_ENABLE           (1 << 26)
+#define CURABASE               0x70084
+#define CURAPOS                        0x70088
+#define CURSOR_POS_MASK                        0x007FF
+#define CURSOR_POS_SIGN                        0x8000
+#define CURSOR_X_SHIFT                 0
+#define CURSOR_Y_SHIFT                 16
+#define CURBCNTR               0x700c0
+#define CURBBASE               0x700c4
+#define CURBPOS                        0x700c8
+#define CURCCNTR               0x700e0
+#define CURCBASE               0x700e4
+#define CURCPOS                        0x700e8
+
+/*
+ * Interrupt Registers
+ */
+#define IER                    0x020a0
+#define IIR                    0x020a4
+#define IMR                    0x020a8
+#define ISR                    0x020ac
+
+/*
+ * MOORESTOWN delta registers
+ */
+#define MRST_DPLL_A            0x0f014
+#define MDFLD_DPLL_B           0x0f018
+#define MDFLD_INPUT_REF_SEL            (1 << 14)
+#define MDFLD_VCO_SEL                  (1 << 16)
+#define DPLLA_MODE_LVDS                        (2 << 26)       /* mrst */
+#define MDFLD_PLL_LATCHEN              (1 << 28)
+#define MDFLD_PWR_GATE_EN              (1 << 30)
+#define MDFLD_P1_MASK                  (0x1FF << 17)
+#define MRST_FPA0              0x0f040
+#define MRST_FPA1              0x0f044
+#define MDFLD_DPLL_DIV0                0x0f048
+#define MDFLD_DPLL_DIV1                0x0f04c
+#define MRST_PERF_MODE         0x020f4
+
+/*
+ * MEDFIELD HDMI registers
+ */
+#define HDMIPHYMISCCTL         0x61134
+#define HDMI_PHY_POWER_DOWN            0x7f
+#define HDMIB_CONTROL          0x61140
+#define HDMIB_PORT_EN                  (1 << 31)
+#define HDMIB_PIPE_B_SELECT            (1 << 30)
+#define HDMIB_NULL_PACKET              (1 << 9)
+#define HDMIB_HDCP_PORT                        (1 << 5)
+
+/* #define LVDS                        0x61180 */
+#define MRST_PANEL_8TO6_DITHER_ENABLE  (1 << 25)
+#define MRST_PANEL_24_DOT_1_FORMAT     (1 << 24)
+#define LVDS_A3_POWER_UP_0_OUTPUT      (1 << 6)
+
+#define MIPI                   0x61190
+#define MIPI_C                 0x62190
+#define MIPI_PORT_EN                   (1 << 31)
+/* Turns on border drawing to allow centered display. */
+#define SEL_FLOPPED_HSTX               (1 << 23)
+#define PASS_FROM_SPHY_TO_AFE          (1 << 16)
+#define MIPI_BORDER_EN                 (1 << 15)
+#define MIPIA_3LANE_MIPIC_1LANE                0x1
+#define MIPIA_2LANE_MIPIC_2LANE                0x2
+#define TE_TRIGGER_DSI_PROTOCOL                (1 << 2)
+#define TE_TRIGGER_GPIO_PIN            (1 << 3)
+#define MIPI_TE_COUNT          0x61194
+
+/* #define PP_CONTROL  0x61204 */
+#define POWER_DOWN_ON_RESET            (1 << 1)
+
+/* #define PFIT_CONTROL        0x61230 */
+#define PFIT_PIPE_SELECT               (3 << 29)
+#define PFIT_PIPE_SELECT_SHIFT         (29)
+
+/* #define BLC_PWM_CTL         0x61254 */
+#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT   (16)
+#define MRST_BACKLIGHT_MODULATION_FREQ_MASK    (0xffff << 16)
+
+/* #define PIPEACONF 0x70008 */
+#define PIPEACONF_PIPE_STATE           (1 << 30)
+/* #define DSPACNTR            0x70180 */
+
+#define MRST_DSPABASE          0x7019c
+#define MRST_DSPBBASE          0x7119c
+#define MDFLD_DSPCBASE         0x7219c
+
+/*
+ * Moorestown registers.
+ */
+
+/*
+ *     MIPI IP registers
+ */
+#define MIPIC_REG_OFFSET               0x800
+
+#define DEVICE_READY_REG               0xb000
+#define LP_OUTPUT_HOLD                         (1 << 16)
+#define EXIT_ULPS_DEV_READY                    0x3
+#define LP_OUTPUT_HOLD_RELEASE                 0x810000
+# define ENTERING_ULPS                         (2 << 1)
+# define EXITING_ULPS                          (1 << 1)
+# define ULPS_MASK                             (3 << 1)
+# define BUS_POSSESSION                                (1 << 3)
+#define INTR_STAT_REG                  0xb004
+#define RX_SOT_ERROR                           (1 << 0)
+#define RX_SOT_SYNC_ERROR                      (1 << 1)
+#define RX_ESCAPE_MODE_ENTRY_ERROR             (1 << 3)
+#define RX_LP_TX_SYNC_ERROR                    (1 << 4)
+#define RX_HS_RECEIVE_TIMEOUT_ERROR            (1 << 5)
+#define RX_FALSE_CONTROL_ERROR                 (1 << 6)
+#define RX_ECC_SINGLE_BIT_ERROR                        (1 << 7)
+#define RX_ECC_MULTI_BIT_ERROR                 (1 << 8)
+#define RX_CHECKSUM_ERROR                      (1 << 9)
+#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED                (1 << 10)
+#define RX_DSI_VC_ID_INVALID                   (1 << 11)
+#define TX_FALSE_CONTROL_ERROR                 (1 << 12)
+#define TX_ECC_SINGLE_BIT_ERROR                        (1 << 13)
+#define TX_ECC_MULTI_BIT_ERROR                 (1 << 14)
+#define TX_CHECKSUM_ERROR                      (1 << 15)
+#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED                (1 << 16)
+#define TX_DSI_VC_ID_INVALID                   (1 << 17)
+#define HIGH_CONTENTION                                (1 << 18)
+#define LOW_CONTENTION                         (1 << 19)
+#define DPI_FIFO_UNDER_RUN                     (1 << 20)
+#define HS_TX_TIMEOUT                          (1 << 21)
+#define LP_RX_TIMEOUT                          (1 << 22)
+#define TURN_AROUND_ACK_TIMEOUT                        (1 << 23)
+#define ACK_WITH_NO_ERROR                      (1 << 24)
+#define HS_GENERIC_WR_FIFO_FULL                        (1 << 27)
+#define LP_GENERIC_WR_FIFO_FULL                        (1 << 28)
+#define SPL_PKT_SENT                           (1 << 30)
+#define INTR_EN_REG                    0xb008
+#define DSI_FUNC_PRG_REG               0xb00c
+#define DPI_CHANNEL_NUMBER_POS                 0x03
+#define DBI_CHANNEL_NUMBER_POS                 0x05
+#define FMT_DPI_POS                            0x07
+#define FMT_DBI_POS                            0x0A
+#define DBI_DATA_WIDTH_POS                     0x0D
+
+/* DPI PIXEL FORMATS */
+#define RGB_565_FMT                            0x01    /* RGB 565 FORMAT */
+#define RGB_666_FMT                            0x02    /* RGB 666 FORMAT */
+#define LRGB_666_FMT                           0x03    /* RGB LOOSELY PACKED
+                                                        * 666 FORMAT
+                                                        */
+#define RGB_888_FMT                            0x04    /* RGB 888 FORMAT */
+#define VIRTUAL_CHANNEL_NUMBER_0               0x00    /* Virtual channel 0 */
+#define VIRTUAL_CHANNEL_NUMBER_1               0x01    /* Virtual channel 1 */
+#define VIRTUAL_CHANNEL_NUMBER_2               0x02    /* Virtual channel 2 */
+#define VIRTUAL_CHANNEL_NUMBER_3               0x03    /* Virtual channel 3 */
+
+#define DBI_NOT_SUPPORTED                      0x00    /* command mode
+                                                        * is not supported
+                                                        */
+#define DBI_DATA_WIDTH_16BIT                   0x01    /* 16 bit data */
+#define DBI_DATA_WIDTH_9BIT                    0x02    /* 9 bit data */
+#define DBI_DATA_WIDTH_8BIT                    0x03    /* 8 bit data */
+#define DBI_DATA_WIDTH_OPT1                    0x04    /* option 1 */
+#define DBI_DATA_WIDTH_OPT2                    0x05    /* option 2 */
+
+#define HS_TX_TIMEOUT_REG              0xb010
+#define LP_RX_TIMEOUT_REG              0xb014
+#define TURN_AROUND_TIMEOUT_REG                0xb018
+#define DEVICE_RESET_REG               0xb01C
+#define DPI_RESOLUTION_REG             0xb020
+#define RES_V_POS                              0x10
+#define DBI_RESOLUTION_REG             0xb024 /* Reserved for MDFLD */
+#define HORIZ_SYNC_PAD_COUNT_REG       0xb028
+#define HORIZ_BACK_PORCH_COUNT_REG     0xb02C
+#define HORIZ_FRONT_PORCH_COUNT_REG    0xb030
+#define HORIZ_ACTIVE_AREA_COUNT_REG    0xb034
+#define VERT_SYNC_PAD_COUNT_REG                0xb038
+#define VERT_BACK_PORCH_COUNT_REG      0xb03c
+#define VERT_FRONT_PORCH_COUNT_REG     0xb040
+#define HIGH_LOW_SWITCH_COUNT_REG      0xb044
+#define DPI_CONTROL_REG                        0xb048
+#define DPI_SHUT_DOWN                          (1 << 0)
+#define DPI_TURN_ON                            (1 << 1)
+#define DPI_COLOR_MODE_ON                      (1 << 2)
+#define DPI_COLOR_MODE_OFF                     (1 << 3)
+#define DPI_BACK_LIGHT_ON                      (1 << 4)
+#define DPI_BACK_LIGHT_OFF                     (1 << 5)
+#define DPI_LP                                 (1 << 6)
+#define DPI_DATA_REG                   0xb04c
+#define DPI_BACK_LIGHT_ON_DATA                 0x07
+#define DPI_BACK_LIGHT_OFF_DATA                        0x17
+#define INIT_COUNT_REG                 0xb050
+#define MAX_RET_PAK_REG                        0xb054
+#define VIDEO_FMT_REG                  0xb058
+#define COMPLETE_LAST_PCKT                     (1 << 2)
+#define EOT_DISABLE_REG                        0xb05c
+#define ENABLE_CLOCK_STOPPING                  (1 << 1)
+#define LP_BYTECLK_REG                 0xb060
+#define LP_GEN_DATA_REG                        0xb064
+#define HS_GEN_DATA_REG                        0xb068
+#define LP_GEN_CTRL_REG                        0xb06C
+#define HS_GEN_CTRL_REG                        0xb070
+#define DCS_CHANNEL_NUMBER_POS         0x6
+#define MCS_COMMANDS_POS               0x8
+#define WORD_COUNTS_POS                        0x8
+#define MCS_PARAMETER_POS                      0x10
+#define GEN_FIFO_STAT_REG              0xb074
+#define HS_DATA_FIFO_FULL                      (1 << 0)
+#define HS_DATA_FIFO_HALF_EMPTY                        (1 << 1)
+#define HS_DATA_FIFO_EMPTY                     (1 << 2)
+#define LP_DATA_FIFO_FULL                      (1 << 8)
+#define LP_DATA_FIFO_HALF_EMPTY                        (1 << 9)
+#define LP_DATA_FIFO_EMPTY                     (1 << 10)
+#define HS_CTRL_FIFO_FULL                      (1 << 16)
+#define HS_CTRL_FIFO_HALF_EMPTY                        (1 << 17)
+#define HS_CTRL_FIFO_EMPTY                     (1 << 18)
+#define LP_CTRL_FIFO_FULL                      (1 << 24)
+#define LP_CTRL_FIFO_HALF_EMPTY                        (1 << 25)
+#define LP_CTRL_FIFO_EMPTY                     (1 << 26)
+#define DBI_FIFO_EMPTY                         (1 << 27)
+#define DPI_FIFO_EMPTY                         (1 << 28)
+#define HS_LS_DBI_ENABLE_REG           0xb078
+#define TXCLKESC_REG                   0xb07c
+#define DPHY_PARAM_REG                 0xb080
+#define DBI_BW_CTRL_REG                        0xb084
+#define CLK_LANE_SWT_REG               0xb088
+
+/*
+ * MIPI Adapter registers
+ */
+#define MIPI_CONTROL_REG               0xb104
+#define MIPI_2X_CLOCK_BITS                     ((1 << 0) | (1 << 1))
+#define MIPI_DATA_ADDRESS_REG          0xb108
+#define MIPI_DATA_LENGTH_REG           0xb10C
+#define MIPI_COMMAND_ADDRESS_REG       0xb110
+#define MIPI_COMMAND_LENGTH_REG                0xb114
+#define MIPI_READ_DATA_RETURN_REG0     0xb118
+#define MIPI_READ_DATA_RETURN_REG1     0xb11C
+#define MIPI_READ_DATA_RETURN_REG2     0xb120
+#define MIPI_READ_DATA_RETURN_REG3     0xb124
+#define MIPI_READ_DATA_RETURN_REG4     0xb128
+#define MIPI_READ_DATA_RETURN_REG5     0xb12C
+#define MIPI_READ_DATA_RETURN_REG6     0xb130
+#define MIPI_READ_DATA_RETURN_REG7     0xb134
+#define MIPI_READ_DATA_VALID_REG       0xb138
+
+/* DBI COMMANDS */
+#define soft_reset                     0x01
+/*
+ *     The display module performs a software reset.
+ *     Registers are written with their SW Reset default values.
+ */
+#define get_power_mode                 0x0a
+/*
+ *     The display module returns the current power mode
+ */
+#define get_address_mode               0x0b
+/*
+ *     The display module returns the current status.
+ */
+#define get_pixel_format               0x0c
+/*
+ *     This command gets the pixel format for the RGB image data
+ *     used by the interface.
+ */
+#define get_display_mode               0x0d
+/*
+ *     The display module returns the Display Image Mode status.
+ */
+#define get_signal_mode                        0x0e
+/*
+ *     The display module returns the Display Signal Mode.
+ */
+#define get_diagnostic_result          0x0f
+/*
+ *     The display module returns the self-diagnostic results following
+ *     a Sleep Out command.
+ */
+#define enter_sleep_mode               0x10
+/*
+ *     This command causes the display module to enter the Sleep mode.
+ *     In this mode, all unnecessary blocks inside the display module are
+ *     disabled except interface communication. This is the lowest power
+ *     mode the display module supports.
+ */
+#define exit_sleep_mode                        0x11
+/*
+ *     This command causes the display module to exit Sleep mode.
+ *     All blocks inside the display module are enabled.
+ */
+#define enter_partial_mode             0x12
+/*
+ *     This command causes the display module to enter the Partial Display
+ *     Mode. The Partial Display Mode window is described by the
+ *     set_partial_area command.
+ */
+#define enter_normal_mode              0x13
+/*
+ *     This command causes the display module to enter the Normal mode.
+ *     Normal Mode is defined as Partial Display mode and Scroll mode are off
+ */
+#define exit_invert_mode               0x20
+/*
+ *     This command causes the display module to stop inverting the image
+ *     data on the display device. The frame memory contents remain unchanged.
+ *     No status bits are changed.
+ */
+#define enter_invert_mode              0x21
+/*
+ *     This command causes the display module to invert the image data only on
+ *     the display device. The frame memory contents remain unchanged.
+ *     No status bits are changed.
+ */
+#define set_gamma_curve                        0x26
+/*
+ *     This command selects the desired gamma curve for the display device.
+ *     Four fixed gamma curves are defined in section DCS spec.
+ */
+#define set_display_off                        0x28
+/* ************************************************************************* *\
+This command causes the display module to stop displaying the image data
+on the display device. The frame memory contents remain unchanged.
+No status bits are changed.
+\* ************************************************************************* */
+#define set_display_on                 0x29
+/* ************************************************************************* *\
+This command causes the display module to start displaying the image data
+on the display device. The frame memory contents remain unchanged.
+No status bits are changed.
+\* ************************************************************************* */
+#define set_column_address             0x2a
+/*
+ *     This command defines the column extent of the frame memory accessed by
+ *     the hostprocessor with the read_memory_continue and
+ *     write_memory_continue commands.
+ *     No status bits are changed.
+ */
+#define set_page_addr                  0x2b
+/*
+ *     This command defines the page extent of the frame memory accessed by
+ *     the host processor with the write_memory_continue and
+ *     read_memory_continue command.
+ *     No status bits are changed.
+ */
+#define write_mem_start                        0x2c
+/*
+ *     This command transfers image data from the host processor to the
+ *     display modules frame memory starting at the pixel location specified
+ *     by preceding set_column_address and set_page_address commands.
+ */
+#define set_partial_area               0x30
+/*
+ *     This command defines the Partial Display mode s display area.
+ *     There are two parameters associated with this command, the first
+ *     defines the Start Row (SR) and the second the End Row (ER). SR and ER
+ *     refer to the Frame Memory Line Pointer.
+ */
+#define set_scroll_area                        0x33
+/*
+ *     This command defines the display modules Vertical Scrolling Area.
+ */
+#define set_tear_off                   0x34
+/*
+ *     This command turns off the display modules Tearing Effect output
+ *     signal on the TE signal line.
+ */
+#define set_tear_on                    0x35
+/*
+ *     This command turns on the display modules Tearing Effect output signal
+ *     on the TE signal line.
+ */
+#define set_address_mode               0x36
+/*
+ *     This command sets the data order for transfers from the host processor
+ *     to display modules frame memory,bits B[7:5] and B3, and from the
+ *     display modules frame memory to the display device, bits B[2:0] and B4.
+ */
+#define set_scroll_start               0x37
+/*
+ *     This command sets the start of the vertical scrolling area in the frame
+ *     memory. The vertical scrolling area is fully defined when this command
+ *     is used with the set_scroll_area command The set_scroll_start command
+ *     has one parameter, the Vertical Scroll Pointer. The VSP defines the
+ *     line in the frame memory that is written to the display device as the
+ *     first line of the vertical scroll area.
+ */
+#define exit_idle_mode                 0x38
+/*
+ *     This command causes the display module to exit Idle mode.
+ */
+#define enter_idle_mode                        0x39
+/*
+ *     This command causes the display module to enter Idle Mode.
+ *     In Idle Mode, color expression is reduced. Colors are shown on the
+ *     display device using the MSB of each of the R, G and B color
+ *     components in the frame memory
+ */
+#define set_pixel_format               0x3a
+/*
+ *     This command sets the pixel format for the RGB image data used by the
+ *     interface.
+ *     Bits D[6:4]  DPI Pixel Format Definition
+ *     Bits D[2:0]  DBI Pixel Format Definition
+ *     Bits D7 and D3 are not used.
+ */
+#define DCS_PIXEL_FORMAT_3bpp          0x1
+#define DCS_PIXEL_FORMAT_8bpp          0x2
+#define DCS_PIXEL_FORMAT_12bpp         0x3
+#define DCS_PIXEL_FORMAT_16bpp         0x5
+#define DCS_PIXEL_FORMAT_18bpp         0x6
+#define DCS_PIXEL_FORMAT_24bpp         0x7
+
+#define write_mem_cont                 0x3c
+
+/*
+ *     This command transfers image data from the host processor to the
+ *     display module's frame memory continuing from the pixel location
+ *     following the previous write_memory_continue or write_memory_start
+ *     command.
+ */
+#define set_tear_scanline              0x44
+/*
+ *     This command turns on the display modules Tearing Effect output signal
+ *     on the TE signal line when the display module reaches line N.
+ */
+#define get_scanline                   0x45
+/*
+ *     The display module returns the current scanline, N, used to update the
+ *      display device. The total number of scanlines on a display device is
+ *     defined as VSYNC + VBP + VACT + VFP.The first scanline is defined as
+ *     the first line of V Sync and is denoted as Line 0.
+ *     When in Sleep Mode, the value returned by get_scanline is undefined.
+ */
+
+/* MCS or Generic COMMANDS */
+/* MCS/generic data type */
+#define GEN_SHORT_WRITE_0      0x03  /* generic short write, no parameters */
+#define GEN_SHORT_WRITE_1      0x13  /* generic short write, 1 parameters */
+#define GEN_SHORT_WRITE_2      0x23  /* generic short write, 2 parameters */
+#define GEN_READ_0             0x04  /* generic read, no parameters */
+#define GEN_READ_1             0x14  /* generic read, 1 parameters */
+#define GEN_READ_2             0x24  /* generic read, 2 parameters */
+#define GEN_LONG_WRITE         0x29  /* generic long write */
+#define MCS_SHORT_WRITE_0      0x05  /* MCS short write, no parameters */
+#define MCS_SHORT_WRITE_1      0x15  /* MCS short write, 1 parameters */
+#define MCS_READ               0x06  /* MCS read, no parameters */
+#define MCS_LONG_WRITE         0x39  /* MCS long write */
+/* MCS/generic commands */
+/* TPO MCS */
+#define write_display_profile          0x50
+#define write_display_brightness       0x51
+#define write_ctrl_display             0x53
+#define write_ctrl_cabc                        0x55
+  #define UI_IMAGE             0x01
+  #define STILL_IMAGE          0x02
+  #define MOVING_IMAGE         0x03
+#define write_hysteresis               0x57
+#define write_gamma_setting            0x58
+#define write_cabc_min_bright          0x5e
+#define write_kbbc_profile             0x60
+/* TMD MCS */
+#define tmd_write_display_brightness 0x8c
+
+/*
+ *     This command is used to control ambient light, panel backlight
+ *     brightness and gamma settings.
+ */
+#define BRIGHT_CNTL_BLOCK_ON   (1 << 5)
+#define AMBIENT_LIGHT_SENSE_ON (1 << 4)
+#define DISPLAY_DIMMING_ON     (1 << 3)
+#define BACKLIGHT_ON           (1 << 2)
+#define DISPLAY_BRIGHTNESS_AUTO        (1 << 1)
+#define GAMMA_AUTO             (1 << 0)
+
+/* DCS Interface Pixel Formats */
+#define DCS_PIXEL_FORMAT_3BPP  0x1
+#define DCS_PIXEL_FORMAT_8BPP  0x2
+#define DCS_PIXEL_FORMAT_12BPP 0x3
+#define DCS_PIXEL_FORMAT_16BPP 0x5
+#define DCS_PIXEL_FORMAT_18BPP 0x6
+#define DCS_PIXEL_FORMAT_24BPP 0x7
+/* ONE PARAMETER READ DATA */
+#define addr_mode_data         0xfc
+#define diag_res_data          0x00
+#define disp_mode_data         0x23
+#define pxl_fmt_data           0x77
+#define pwr_mode_data          0x74
+#define sig_mode_data          0x00
+/* TWO PARAMETERS READ DATA */
+#define scanline_data1         0xff
+#define scanline_data2         0xff
+#define NON_BURST_MODE_SYNC_PULSE      0x01    /* Non Burst Mode
+                                                * with Sync Pulse
+                                                */
+#define NON_BURST_MODE_SYNC_EVENTS     0x02    /* Non Burst Mode
+                                                * with Sync events
+                                                */
+#define BURST_MODE                     0x03    /* Burst Mode */
+#define DBI_COMMAND_BUFFER_SIZE                0x240   /* 0x32 */    /* 0x120 */
+                                               /* Allocate at least
+                                                * 0x100 Byte with 32
+                                                * byte alignment
+                                                */
+#define DBI_DATA_BUFFER_SIZE           0x120   /* Allocate at least
+                                                * 0x100 Byte with 32
+                                                * byte alignment
+                                                */
+#define DBI_CB_TIME_OUT                        0xFFFF
+
+#define GEN_FB_TIME_OUT                        2000
+
+#define SKU_83                         0x01
+#define SKU_100                                0x02
+#define SKU_100L                       0x04
+#define SKU_BYPASS                     0x08
+
+/* Some handy macros for playing with bitfields. */
+#define PSB_MASK(high, low) (((1<<((high)-(low)+1))-1)<<(low))
+#define SET_FIELD(value, field) (((value) << field ## _SHIFT) & field ## _MASK)
+#define GET_FIELD(word, field) (((word)  & field ## _MASK) >> field ## _SHIFT)
+
+#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+
+/* PCI config space */
+
+#define SB_PCKT         0x02100 /* cedarview */
+# define SB_OPCODE_MASK                         PSB_MASK(31, 16)
+# define SB_OPCODE_SHIFT                        16
+# define SB_OPCODE_READ                         0
+# define SB_OPCODE_WRITE                        1
+# define SB_DEST_MASK                           PSB_MASK(15, 8)
+# define SB_DEST_SHIFT                          8
+# define SB_DEST_DPLL                           0x88
+# define SB_BYTE_ENABLE_MASK                    PSB_MASK(7, 4)
+# define SB_BYTE_ENABLE_SHIFT                   4
+# define SB_BUSY                                (1 << 0)
+
+
+/* 32-bit value read/written from the DPIO reg. */
+#define SB_DATA                0x02104 /* cedarview */
+/* 32-bit address of the DPIO reg to be read/written. */
+#define SB_ADDR                0x02108 /* cedarview */
+#define DPIO_CFG       0x02110 /* cedarview */
+# define DPIO_MODE_SELECT_1                    (1 << 3)
+# define DPIO_MODE_SELECT_0                    (1 << 2)
+# define DPIO_SFR_BYPASS                       (1 << 1)
+/* reset is active low */
+# define DPIO_CMN_RESET_N                      (1 << 0)
+
+/* Cedarview sideband registers */
+#define _SB_M_A                        0x8008
+#define _SB_M_B                        0x8028
+#define SB_M(pipe) _PIPE(pipe, _SB_M_A, _SB_M_B)
+# define SB_M_DIVIDER_MASK                     (0xFF << 24)
+# define SB_M_DIVIDER_SHIFT                    24
+
+#define _SB_N_VCO_A            0x8014
+#define _SB_N_VCO_B            0x8034
+#define SB_N_VCO(pipe) _PIPE(pipe, _SB_N_VCO_A, _SB_N_VCO_B)
+#define SB_N_VCO_SEL_MASK                      PSB_MASK(31, 30)
+#define SB_N_VCO_SEL_SHIFT                     30
+#define SB_N_DIVIDER_MASK                      PSB_MASK(29, 26)
+#define SB_N_DIVIDER_SHIFT                     26
+#define SB_N_CB_TUNE_MASK                      PSB_MASK(25, 24)
+#define SB_N_CB_TUNE_SHIFT                     24
+
+#define _SB_REF_A              0x8018
+#define _SB_REF_B              0x8038
+#define SB_REF_SFR(pipe)       _PIPE(pipe, _SB_REF_A, _SB_REF_B)
+
+#define _SB_P_A                        0x801c
+#define _SB_P_B                        0x803c
+#define SB_P(pipe) _PIPE(pipe, _SB_P_A, _SB_P_B)
+#define SB_P2_DIVIDER_MASK                     PSB_MASK(31, 30)
+#define SB_P2_DIVIDER_SHIFT                    30
+#define SB_P2_10                               0 /* HDMI, DP, DAC */
+#define SB_P2_5                                1 /* DAC */
+#define SB_P2_14                               2 /* LVDS single */
+#define SB_P2_7                                3 /* LVDS double */
+#define SB_P1_DIVIDER_MASK                     PSB_MASK(15, 12)
+#define SB_P1_DIVIDER_SHIFT                    12
+
+#define PSB_LANE0              0x120
+#define PSB_LANE1              0x220
+#define PSB_LANE2              0x2320
+#define PSB_LANE3              0x2420
+
+#define LANE_PLL_MASK          (0x7 << 20)
+#define LANE_PLL_ENABLE                (0x3 << 20)
+
+
+#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
new file mode 100644 (file)
index 0000000..4882b29
--- /dev/null
@@ -0,0 +1,2617 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright Â© 2006-2007 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_edid.h"
+#include "psb_intel_drv.h"
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_intel_sdvo_regs.h"
+#include "psb_intel_reg.h"
+
+#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
+#define SDVO_RGB_MASK  (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
+#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
+#define SDVO_TV_MASK   (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
+
+#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
+                         SDVO_TV_MASK)
+
+#define IS_TV(c)       (c->output_flag & SDVO_TV_MASK)
+#define IS_TMDS(c)     (c->output_flag & SDVO_TMDS_MASK)
+#define IS_LVDS(c)     (c->output_flag & SDVO_LVDS_MASK)
+#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+
+
+static const char *tv_format_names[] = {
+       "NTSC_M"   , "NTSC_J"  , "NTSC_443",
+       "PAL_B"    , "PAL_D"   , "PAL_G"   ,
+       "PAL_H"    , "PAL_I"   , "PAL_M"   ,
+       "PAL_N"    , "PAL_NC"  , "PAL_60"  ,
+       "SECAM_B"  , "SECAM_D" , "SECAM_G" ,
+       "SECAM_K"  , "SECAM_K1", "SECAM_L" ,
+       "SECAM_60"
+};
+
+#define TV_FORMAT_NUM  (sizeof(tv_format_names) / sizeof(*tv_format_names))
+
+struct psb_intel_sdvo {
+       struct psb_intel_encoder base;
+
+       struct i2c_adapter *i2c;
+       u8 slave_addr;
+
+       struct i2c_adapter ddc;
+
+       /* Register for the SDVO device: SDVOB or SDVOC */
+       int sdvo_reg;
+
+       /* Active outputs controlled by this SDVO output */
+       uint16_t controlled_output;
+
+       /*
+        * Capabilities of the SDVO device returned by
+        * i830_sdvo_get_capabilities()
+        */
+       struct psb_intel_sdvo_caps caps;
+
+       /* Pixel clock limitations reported by the SDVO device, in kHz */
+       int pixel_clock_min, pixel_clock_max;
+
+       /*
+       * For multiple function SDVO device,
+       * this is for current attached outputs.
+       */
+       uint16_t attached_output;
+
+       /**
+        * This is used to select the color range of RBG outputs in HDMI mode.
+        * It is only valid when using TMDS encoding and 8 bit per color mode.
+        */
+       uint32_t color_range;
+
+       /**
+        * This is set if we're going to treat the device as TV-out.
+        *
+        * While we have these nice friendly flags for output types that ought
+        * to decide this for us, the S-Video output on our HDMI+S-Video card
+        * shows up as RGB1 (VGA).
+        */
+       bool is_tv;
+
+       /* This is for current tv format name */
+       int tv_format_index;
+
+       /**
+        * This is set if we treat the device as HDMI, instead of DVI.
+        */
+       bool is_hdmi;
+       bool has_hdmi_monitor;
+       bool has_hdmi_audio;
+
+       /**
+        * This is set if we detect output of sdvo device as LVDS and
+        * have a valid fixed mode to use with the panel.
+        */
+       bool is_lvds;
+
+       /**
+        * This is sdvo fixed pannel mode pointer
+        */
+       struct drm_display_mode *sdvo_lvds_fixed_mode;
+
+       /* DDC bus used by this SDVO encoder */
+       uint8_t ddc_bus;
+
+       /* Input timings for adjusted_mode */
+       struct psb_intel_sdvo_dtd input_dtd;
+};
+
+struct psb_intel_sdvo_connector {
+       struct psb_intel_connector base;
+
+       /* Mark the type of connector */
+       uint16_t output_flag;
+
+       int force_audio;
+
+       /* This contains all current supported TV format */
+       u8 tv_format_supported[TV_FORMAT_NUM];
+       int   format_supported_num;
+       struct drm_property *tv_format;
+
+       /* add the property for the SDVO-TV */
+       struct drm_property *left;
+       struct drm_property *right;
+       struct drm_property *top;
+       struct drm_property *bottom;
+       struct drm_property *hpos;
+       struct drm_property *vpos;
+       struct drm_property *contrast;
+       struct drm_property *saturation;
+       struct drm_property *hue;
+       struct drm_property *sharpness;
+       struct drm_property *flicker_filter;
+       struct drm_property *flicker_filter_adaptive;
+       struct drm_property *flicker_filter_2d;
+       struct drm_property *tv_chroma_filter;
+       struct drm_property *tv_luma_filter;
+       struct drm_property *dot_crawl;
+
+       /* add the property for the SDVO-TV/LVDS */
+       struct drm_property *brightness;
+
+       /* Add variable to record current setting for the above property */
+       u32     left_margin, right_margin, top_margin, bottom_margin;
+
+       /* this is to get the range of margin.*/
+       u32     max_hscan,  max_vscan;
+       u32     max_hpos, cur_hpos;
+       u32     max_vpos, cur_vpos;
+       u32     cur_brightness, max_brightness;
+       u32     cur_contrast,   max_contrast;
+       u32     cur_saturation, max_saturation;
+       u32     cur_hue,        max_hue;
+       u32     cur_sharpness,  max_sharpness;
+       u32     cur_flicker_filter,             max_flicker_filter;
+       u32     cur_flicker_filter_adaptive,    max_flicker_filter_adaptive;
+       u32     cur_flicker_filter_2d,          max_flicker_filter_2d;
+       u32     cur_tv_chroma_filter,   max_tv_chroma_filter;
+       u32     cur_tv_luma_filter,     max_tv_luma_filter;
+       u32     cur_dot_crawl,  max_dot_crawl;
+};
+
+static struct psb_intel_sdvo *to_psb_intel_sdvo(struct drm_encoder *encoder)
+{
+       return container_of(encoder, struct psb_intel_sdvo, base.base);
+}
+
+static struct psb_intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
+{
+       return container_of(psb_intel_attached_encoder(connector),
+                           struct psb_intel_sdvo, base);
+}
+
+static struct psb_intel_sdvo_connector *to_psb_intel_sdvo_connector(struct drm_connector *connector)
+{
+       return container_of(to_psb_intel_connector(connector), struct psb_intel_sdvo_connector, base);
+}
+
+static bool
+psb_intel_sdvo_output_setup(struct psb_intel_sdvo *psb_intel_sdvo, uint16_t flags);
+static bool
+psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_sdvo,
+                             struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+                             int type);
+static bool
+psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
+                                  struct psb_intel_sdvo_connector *psb_intel_sdvo_connector);
+
+/**
+ * Writes the SDVOB or SDVOC with the given value, but always writes both
+ * SDVOB and SDVOC to work around apparent hardware issues (according to
+ * comments in the BIOS).
+ */
+static void psb_intel_sdvo_write_sdvox(struct psb_intel_sdvo *psb_intel_sdvo, u32 val)
+{
+       struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+       u32 bval = val, cval = val;
+       int i;
+
+       if (psb_intel_sdvo->sdvo_reg == SDVOB) {
+               cval = REG_READ(SDVOC);
+       } else {
+               bval = REG_READ(SDVOB);
+       }
+       /*
+        * Write the registers twice for luck. Sometimes,
+        * writing them only once doesn't appear to 'stick'.
+        * The BIOS does this too. Yay, magic
+        */
+       for (i = 0; i < 2; i++)
+       {
+               REG_WRITE(SDVOB, bval);
+               REG_READ(SDVOB);
+               REG_WRITE(SDVOC, cval);
+               REG_READ(SDVOC);
+       }
+}
+
+static bool psb_intel_sdvo_read_byte(struct psb_intel_sdvo *psb_intel_sdvo, u8 addr, u8 *ch)
+{
+       struct i2c_msg msgs[] = {
+               {
+                       .addr = psb_intel_sdvo->slave_addr,
+                       .flags = 0,
+                       .len = 1,
+                       .buf = &addr,
+               },
+               {
+                       .addr = psb_intel_sdvo->slave_addr,
+                       .flags = I2C_M_RD,
+                       .len = 1,
+                       .buf = ch,
+               }
+       };
+       int ret;
+
+       if ((ret = i2c_transfer(psb_intel_sdvo->i2c, msgs, 2)) == 2)
+               return true;
+
+       DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
+       return false;
+}
+
+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
+/** Mapping of command numbers to names, for debug output */
+static const struct _sdvo_cmd_name {
+       u8 cmd;
+       const char *name;
+} sdvo_cmd_names[] = {
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+
+    /* Add the op code for SDVO enhancements */
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+
+    /* HDMI op code */
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
+};
+
+#define IS_SDVOB(reg)  (reg == SDVOB)
+#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
+
+static void psb_intel_sdvo_debug_write(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
+                                  const void *args, int args_len)
+{
+       int i;
+
+       DRM_DEBUG_KMS("%s: W: %02X ",
+                               SDVO_NAME(psb_intel_sdvo), cmd);
+       for (i = 0; i < args_len; i++)
+               DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
+       for (; i < 8; i++)
+               DRM_LOG_KMS("   ");
+       for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
+               if (cmd == sdvo_cmd_names[i].cmd) {
+                       DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
+                       break;
+               }
+       }
+       if (i == ARRAY_SIZE(sdvo_cmd_names))
+               DRM_LOG_KMS("(%02X)", cmd);
+       DRM_LOG_KMS("\n");
+}
+
+static const char *cmd_status_names[] = {
+       "Power on",
+       "Success",
+       "Not supported",
+       "Invalid arg",
+       "Pending",
+       "Target not specified",
+       "Scaling not supported"
+};
+
+static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
+                                const void *args, int args_len)
+{
+       u8 buf[args_len*2 + 2], status;
+       struct i2c_msg msgs[args_len + 3];
+       int i, ret;
+
+       psb_intel_sdvo_debug_write(psb_intel_sdvo, cmd, args, args_len);
+
+       for (i = 0; i < args_len; i++) {
+               msgs[i].addr = psb_intel_sdvo->slave_addr;
+               msgs[i].flags = 0;
+               msgs[i].len = 2;
+               msgs[i].buf = buf + 2 *i;
+               buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
+               buf[2*i + 1] = ((u8*)args)[i];
+       }
+       msgs[i].addr = psb_intel_sdvo->slave_addr;
+       msgs[i].flags = 0;
+       msgs[i].len = 2;
+       msgs[i].buf = buf + 2*i;
+       buf[2*i + 0] = SDVO_I2C_OPCODE;
+       buf[2*i + 1] = cmd;
+
+       /* the following two are to read the response */
+       status = SDVO_I2C_CMD_STATUS;
+       msgs[i+1].addr = psb_intel_sdvo->slave_addr;
+       msgs[i+1].flags = 0;
+       msgs[i+1].len = 1;
+       msgs[i+1].buf = &status;
+
+       msgs[i+2].addr = psb_intel_sdvo->slave_addr;
+       msgs[i+2].flags = I2C_M_RD;
+       msgs[i+2].len = 1;
+       msgs[i+2].buf = &status;
+
+       ret = i2c_transfer(psb_intel_sdvo->i2c, msgs, i+3);
+       if (ret < 0) {
+               DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+               return false;
+       }
+       if (ret != i+3) {
+               /* failure in I2C transfer */
+               DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
+               return false;
+       }
+
+       return true;
+}
+
+static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
+                                    void *response, int response_len)
+{
+       u8 retry = 5;
+       u8 status;
+       int i;
+
+       DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(psb_intel_sdvo));
+
+       /*
+        * The documentation states that all commands will be
+        * processed within 15µs, and that we need only poll
+        * the status byte a maximum of 3 times in order for the
+        * command to be complete.
+        *
+        * Check 5 times in case the hardware failed to read the docs.
+        */
+       if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
+                                 SDVO_I2C_CMD_STATUS,
+                                 &status))
+               goto log_fail;
+
+       while (status == SDVO_CMD_STATUS_PENDING && retry--) {
+               udelay(15);
+               if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
+                                         SDVO_I2C_CMD_STATUS,
+                                         &status))
+                       goto log_fail;
+       }
+
+       if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+               DRM_LOG_KMS("(%s)", cmd_status_names[status]);
+       else
+               DRM_LOG_KMS("(??? %d)", status);
+
+       if (status != SDVO_CMD_STATUS_SUCCESS)
+               goto log_fail;
+
+       /* Read the command response */
+       for (i = 0; i < response_len; i++) {
+               if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
+                                         SDVO_I2C_RETURN_0 + i,
+                                         &((u8 *)response)[i]))
+                       goto log_fail;
+               DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
+       }
+       DRM_LOG_KMS("\n");
+       return true;
+
+log_fail:
+       DRM_LOG_KMS("... failed\n");
+       return false;
+}
+
+static int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
+{
+       if (mode->clock >= 100000)
+               return 1;
+       else if (mode->clock >= 50000)
+               return 2;
+       else
+               return 4;
+}
+
+static bool psb_intel_sdvo_set_control_bus_switch(struct psb_intel_sdvo *psb_intel_sdvo,
+                                             u8 ddc_bus)
+{
+       /* This must be the immediately preceding write before the i2c xfer */
+       return psb_intel_sdvo_write_cmd(psb_intel_sdvo,
+                                   SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+                                   &ddc_bus, 1);
+}
+
+static bool psb_intel_sdvo_set_value(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd, const void *data, int len)
+{
+       if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo, cmd, data, len))
+               return false;
+
+       return psb_intel_sdvo_read_response(psb_intel_sdvo, NULL, 0);
+}
+
+static bool
+psb_intel_sdvo_get_value(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd, void *value, int len)
+{
+       if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo, cmd, NULL, 0))
+               return false;
+
+       return psb_intel_sdvo_read_response(psb_intel_sdvo, value, len);
+}
+
+static bool psb_intel_sdvo_set_target_input(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+       struct psb_intel_sdvo_set_target_input_args targets = {0};
+       return psb_intel_sdvo_set_value(psb_intel_sdvo,
+                                   SDVO_CMD_SET_TARGET_INPUT,
+                                   &targets, sizeof(targets));
+}
+
+/**
+ * Return whether each input is trained.
+ *
+ * This function is making an assumption about the layout of the response,
+ * which should be checked against the docs.
+ */
+static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_sdvo *psb_intel_sdvo, bool *input_1, bool *input_2)
+{
+       struct psb_intel_sdvo_get_trained_inputs_response response;
+
+       BUILD_BUG_ON(sizeof(response) != 1);
+       if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
+                                 &response, sizeof(response)))
+               return false;
+
+       *input_1 = response.input0_trained;
+       *input_2 = response.input1_trained;
+       return true;
+}
+
+static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_sdvo *psb_intel_sdvo,
+                                         u16 outputs)
+{
+       return psb_intel_sdvo_set_value(psb_intel_sdvo,
+                                   SDVO_CMD_SET_ACTIVE_OUTPUTS,
+                                   &outputs, sizeof(outputs));
+}
+
+static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_sdvo *psb_intel_sdvo,
+                                              int mode)
+{
+       u8 state = SDVO_ENCODER_STATE_ON;
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               state = SDVO_ENCODER_STATE_ON;
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+               state = SDVO_ENCODER_STATE_STANDBY;
+               break;
+       case DRM_MODE_DPMS_SUSPEND:
+               state = SDVO_ENCODER_STATE_SUSPEND;
+               break;
+       case DRM_MODE_DPMS_OFF:
+               state = SDVO_ENCODER_STATE_OFF;
+               break;
+       }
+
+       return psb_intel_sdvo_set_value(psb_intel_sdvo,
+                                   SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
+}
+
+static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_sdvo *psb_intel_sdvo,
+                                                  int *clock_min,
+                                                  int *clock_max)
+{
+       struct psb_intel_sdvo_pixel_clock_range clocks;
+
+       BUILD_BUG_ON(sizeof(clocks) != 4);
+       if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+                                 SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+                                 &clocks, sizeof(clocks)))
+               return false;
+
+       /* Convert the values from units of 10 kHz to kHz. */
+       *clock_min = clocks.min * 10;
+       *clock_max = clocks.max * 10;
+       return true;
+}
+
+static bool psb_intel_sdvo_set_target_output(struct psb_intel_sdvo *psb_intel_sdvo,
+                                        u16 outputs)
+{
+       return psb_intel_sdvo_set_value(psb_intel_sdvo,
+                                   SDVO_CMD_SET_TARGET_OUTPUT,
+                                   &outputs, sizeof(outputs));
+}
+
+static bool psb_intel_sdvo_set_timing(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
+                                 struct psb_intel_sdvo_dtd *dtd)
+{
+       return psb_intel_sdvo_set_value(psb_intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
+               psb_intel_sdvo_set_value(psb_intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool psb_intel_sdvo_set_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+                                        struct psb_intel_sdvo_dtd *dtd)
+{
+       return psb_intel_sdvo_set_timing(psb_intel_sdvo,
+                                    SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
+}
+
+static bool psb_intel_sdvo_set_output_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+                                        struct psb_intel_sdvo_dtd *dtd)
+{
+       return psb_intel_sdvo_set_timing(psb_intel_sdvo,
+                                    SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
+}
+
+static bool
+psb_intel_sdvo_create_preferred_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+                                        uint16_t clock,
+                                        uint16_t width,
+                                        uint16_t height)
+{
+       struct psb_intel_sdvo_preferred_input_timing_args args;
+
+       memset(&args, 0, sizeof(args));
+       args.clock = clock;
+       args.width = width;
+       args.height = height;
+       args.interlace = 0;
+
+       if (psb_intel_sdvo->is_lvds &&
+          (psb_intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
+           psb_intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
+               args.scaled = 1;
+
+       return psb_intel_sdvo_set_value(psb_intel_sdvo,
+                                   SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
+                                   &args, sizeof(args));
+}
+
+static bool psb_intel_sdvo_get_preferred_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+                                                 struct psb_intel_sdvo_dtd *dtd)
+{
+       BUILD_BUG_ON(sizeof(dtd->part1) != 8);
+       BUILD_BUG_ON(sizeof(dtd->part2) != 8);
+       return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+                                   &dtd->part1, sizeof(dtd->part1)) &&
+               psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+                                    &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_sdvo *psb_intel_sdvo, u8 val)
+{
+       return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
+}
+
+static void psb_intel_sdvo_get_dtd_from_mode(struct psb_intel_sdvo_dtd *dtd,
+                                        const struct drm_display_mode *mode)
+{
+       uint16_t width, height;
+       uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
+       uint16_t h_sync_offset, v_sync_offset;
+
+       width = mode->crtc_hdisplay;
+       height = mode->crtc_vdisplay;
+
+       /* do some mode translations */
+       h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
+       h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+
+       v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
+       v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+
+       h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
+       v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
+
+       dtd->part1.clock = mode->clock / 10;
+       dtd->part1.h_active = width & 0xff;
+       dtd->part1.h_blank = h_blank_len & 0xff;
+       dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
+               ((h_blank_len >> 8) & 0xf);
+       dtd->part1.v_active = height & 0xff;
+       dtd->part1.v_blank = v_blank_len & 0xff;
+       dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
+               ((v_blank_len >> 8) & 0xf);
+
+       dtd->part2.h_sync_off = h_sync_offset & 0xff;
+       dtd->part2.h_sync_width = h_sync_len & 0xff;
+       dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
+               (v_sync_len & 0xf);
+       dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
+               ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
+               ((v_sync_len & 0x30) >> 4);
+
+       dtd->part2.dtd_flags = 0x18;
+       if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+               dtd->part2.dtd_flags |= 0x2;
+       if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+               dtd->part2.dtd_flags |= 0x4;
+
+       dtd->part2.sdvo_flags = 0;
+       dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
+       dtd->part2.reserved = 0;
+}
+
+static void psb_intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
+                                        const struct psb_intel_sdvo_dtd *dtd)
+{
+       mode->hdisplay = dtd->part1.h_active;
+       mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
+       mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
+       mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
+       mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
+       mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
+       mode->htotal = mode->hdisplay + dtd->part1.h_blank;
+       mode->htotal += (dtd->part1.h_high & 0xf) << 8;
+
+       mode->vdisplay = dtd->part1.v_active;
+       mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
+       mode->vsync_start = mode->vdisplay;
+       mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
+       mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
+       mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
+       mode->vsync_end = mode->vsync_start +
+               (dtd->part2.v_sync_off_width & 0xf);
+       mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
+       mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
+       mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
+
+       mode->clock = dtd->part1.clock * 10;
+
+       mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+       if (dtd->part2.dtd_flags & 0x2)
+               mode->flags |= DRM_MODE_FLAG_PHSYNC;
+       if (dtd->part2.dtd_flags & 0x4)
+               mode->flags |= DRM_MODE_FLAG_PVSYNC;
+}
+
+static bool psb_intel_sdvo_check_supp_encode(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+       struct psb_intel_sdvo_encode encode;
+
+       BUILD_BUG_ON(sizeof(encode) != 2);
+       return psb_intel_sdvo_get_value(psb_intel_sdvo,
+                                 SDVO_CMD_GET_SUPP_ENCODE,
+                                 &encode, sizeof(encode));
+}
+
+static bool psb_intel_sdvo_set_encode(struct psb_intel_sdvo *psb_intel_sdvo,
+                                 uint8_t mode)
+{
+       return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
+}
+
+static bool psb_intel_sdvo_set_colorimetry(struct psb_intel_sdvo *psb_intel_sdvo,
+                                      uint8_t mode)
+{
+       return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
+}
+
+#if 0
+static void psb_intel_sdvo_dump_hdmi_buf(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+       int i, j;
+       uint8_t set_buf_index[2];
+       uint8_t av_split;
+       uint8_t buf_size;
+       uint8_t buf[48];
+       uint8_t *pos;
+
+       psb_intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
+
+       for (i = 0; i <= av_split; i++) {
+               set_buf_index[0] = i; set_buf_index[1] = 0;
+               psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
+                                    set_buf_index, 2);
+               psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
+               psb_intel_sdvo_read_response(encoder, &buf_size, 1);
+
+               pos = buf;
+               for (j = 0; j <= buf_size; j += 8) {
+                       psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
+                                            NULL, 0);
+                       psb_intel_sdvo_read_response(encoder, pos, 8);
+                       pos += 8;
+               }
+       }
+}
+#endif
+
+static bool psb_intel_sdvo_set_avi_infoframe(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+       DRM_INFO("HDMI is not supported yet");
+
+       return false;
+#if 0
+       struct dip_infoframe avi_if = {
+               .type = DIP_TYPE_AVI,
+               .ver = DIP_VERSION_AVI,
+               .len = DIP_LEN_AVI,
+       };
+       uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
+       uint8_t set_buf_index[2] = { 1, 0 };
+       uint64_t *data = (uint64_t *)&avi_if;
+       unsigned i;
+
+       intel_dip_infoframe_csum(&avi_if);
+
+       if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
+                                 SDVO_CMD_SET_HBUF_INDEX,
+                                 set_buf_index, 2))
+               return false;
+
+       for (i = 0; i < sizeof(avi_if); i += 8) {
+               if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
+                                         SDVO_CMD_SET_HBUF_DATA,
+                                         data, 8))
+                       return false;
+               data++;
+       }
+
+       return psb_intel_sdvo_set_value(psb_intel_sdvo,
+                                   SDVO_CMD_SET_HBUF_TXRATE,
+                                   &tx_rate, 1);
+#endif
+}
+
+static bool psb_intel_sdvo_set_tv_format(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+       struct psb_intel_sdvo_tv_format format;
+       uint32_t format_map;
+
+       format_map = 1 << psb_intel_sdvo->tv_format_index;
+       memset(&format, 0, sizeof(format));
+       memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
+
+       BUILD_BUG_ON(sizeof(format) != 6);
+       return psb_intel_sdvo_set_value(psb_intel_sdvo,
+                                   SDVO_CMD_SET_TV_FORMAT,
+                                   &format, sizeof(format));
+}
+
+static bool
+psb_intel_sdvo_set_output_timings_from_mode(struct psb_intel_sdvo *psb_intel_sdvo,
+                                       struct drm_display_mode *mode)
+{
+       struct psb_intel_sdvo_dtd output_dtd;
+
+       if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
+                                         psb_intel_sdvo->attached_output))
+               return false;
+
+       psb_intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+       if (!psb_intel_sdvo_set_output_timing(psb_intel_sdvo, &output_dtd))
+               return false;
+
+       return true;
+}
+
+static bool
+psb_intel_sdvo_set_input_timings_for_mode(struct psb_intel_sdvo *psb_intel_sdvo,
+                                       struct drm_display_mode *mode,
+                                       struct drm_display_mode *adjusted_mode)
+{
+       /* Reset the input timing to the screen. Assume always input 0. */
+       if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
+               return false;
+
+       if (!psb_intel_sdvo_create_preferred_input_timing(psb_intel_sdvo,
+                                                     mode->clock / 10,
+                                                     mode->hdisplay,
+                                                     mode->vdisplay))
+               return false;
+
+       if (!psb_intel_sdvo_get_preferred_input_timing(psb_intel_sdvo,
+                                                  &psb_intel_sdvo->input_dtd))
+               return false;
+
+       psb_intel_sdvo_get_mode_from_dtd(adjusted_mode, &psb_intel_sdvo->input_dtd);
+
+       drm_mode_set_crtcinfo(adjusted_mode, 0);
+       return true;
+}
+
+static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+       int multiplier;
+
+       /* We need to construct preferred input timings based on our
+        * output timings.  To do that, we have to set the output
+        * timings, even though this isn't really the right place in
+        * the sequence to do it. Oh well.
+        */
+       if (psb_intel_sdvo->is_tv) {
+               if (!psb_intel_sdvo_set_output_timings_from_mode(psb_intel_sdvo, mode))
+                       return false;
+
+               (void) psb_intel_sdvo_set_input_timings_for_mode(psb_intel_sdvo,
+                                                            mode,
+                                                            adjusted_mode);
+       } else if (psb_intel_sdvo->is_lvds) {
+               if (!psb_intel_sdvo_set_output_timings_from_mode(psb_intel_sdvo,
+                                                            psb_intel_sdvo->sdvo_lvds_fixed_mode))
+                       return false;
+
+               (void) psb_intel_sdvo_set_input_timings_for_mode(psb_intel_sdvo,
+                                                            mode,
+                                                            adjusted_mode);
+       }
+
+       /* Make the CRTC code factor in the SDVO pixel multiplier.  The
+        * SDVO device will factor out the multiplier during mode_set.
+        */
+       multiplier = psb_intel_sdvo_get_pixel_multiplier(adjusted_mode);
+       psb_intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
+
+       return true;
+}
+
+static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
+                               struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_crtc *crtc = encoder->crtc;
+       struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+       struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+       u32 sdvox;
+       struct psb_intel_sdvo_in_out_map in_out;
+       struct psb_intel_sdvo_dtd input_dtd;
+       int pixel_multiplier = psb_intel_mode_get_pixel_multiplier(adjusted_mode);
+       int rate;
+
+       if (!mode)
+               return;
+
+       /* First, set the input mapping for the first input to our controlled
+        * output. This is only correct if we're a single-input device, in
+        * which case the first input is the output from the appropriate SDVO
+        * channel on the motherboard.  In a two-input device, the first input
+        * will be SDVOB and the second SDVOC.
+        */
+       in_out.in0 = psb_intel_sdvo->attached_output;
+       in_out.in1 = 0;
+
+       psb_intel_sdvo_set_value(psb_intel_sdvo,
+                            SDVO_CMD_SET_IN_OUT_MAP,
+                            &in_out, sizeof(in_out));
+
+       /* Set the output timings to the screen */
+       if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
+                                         psb_intel_sdvo->attached_output))
+               return;
+
+       /* We have tried to get input timing in mode_fixup, and filled into
+        * adjusted_mode.
+        */
+       if (psb_intel_sdvo->is_tv || psb_intel_sdvo->is_lvds) {
+               input_dtd = psb_intel_sdvo->input_dtd;
+       } else {
+               /* Set the output timing to the screen */
+               if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
+                                                 psb_intel_sdvo->attached_output))
+                       return;
+
+               psb_intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+               (void) psb_intel_sdvo_set_output_timing(psb_intel_sdvo, &input_dtd);
+       }
+
+       /* Set the input timing to the screen. Assume always input 0. */
+       if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
+               return;
+
+       if (psb_intel_sdvo->has_hdmi_monitor) {
+               psb_intel_sdvo_set_encode(psb_intel_sdvo, SDVO_ENCODE_HDMI);
+               psb_intel_sdvo_set_colorimetry(psb_intel_sdvo,
+                                          SDVO_COLORIMETRY_RGB256);
+               psb_intel_sdvo_set_avi_infoframe(psb_intel_sdvo);
+       } else
+               psb_intel_sdvo_set_encode(psb_intel_sdvo, SDVO_ENCODE_DVI);
+
+       if (psb_intel_sdvo->is_tv &&
+           !psb_intel_sdvo_set_tv_format(psb_intel_sdvo))
+               return;
+
+       (void) psb_intel_sdvo_set_input_timing(psb_intel_sdvo, &input_dtd);
+
+       switch (pixel_multiplier) {
+       default:
+       case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
+       case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
+       case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
+       }
+       if (!psb_intel_sdvo_set_clock_rate_mult(psb_intel_sdvo, rate))
+               return;
+
+       /* Set the SDVO control regs. */
+       sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
+       switch (psb_intel_sdvo->sdvo_reg) {
+       case SDVOB:
+               sdvox &= SDVOB_PRESERVE_MASK;
+               break;
+       case SDVOC:
+               sdvox &= SDVOC_PRESERVE_MASK;
+               break;
+       }
+       sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
+
+       if (psb_intel_crtc->pipe == 1)
+               sdvox |= SDVO_PIPE_B_SELECT;
+       if (psb_intel_sdvo->has_hdmi_audio)
+               sdvox |= SDVO_AUDIO_ENABLE;
+
+       /* FIXME: Check if this is needed for PSB
+       sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
+       */
+
+       if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
+               sdvox |= SDVO_STALL_SELECT;
+       psb_intel_sdvo_write_sdvox(psb_intel_sdvo, sdvox);
+}
+
+static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+       u32 temp;
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               DRM_DEBUG("DPMS_ON");
+               break;
+       case DRM_MODE_DPMS_OFF:
+               DRM_DEBUG("DPMS_OFF");
+               break;
+       default:
+               DRM_DEBUG("DPMS: %d", mode);
+       }
+
+       if (mode != DRM_MODE_DPMS_ON) {
+               psb_intel_sdvo_set_active_outputs(psb_intel_sdvo, 0);
+               if (0)
+                       psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
+
+               if (mode == DRM_MODE_DPMS_OFF) {
+                       temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+                       if ((temp & SDVO_ENABLE) != 0) {
+                               psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp & ~SDVO_ENABLE);
+                       }
+               }
+       } else {
+               bool input1, input2;
+               int i;
+               u8 status;
+
+               temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+               if ((temp & SDVO_ENABLE) == 0)
+                       psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
+               for (i = 0; i < 2; i++)
+                       psb_intel_wait_for_vblank(dev);
+
+               status = psb_intel_sdvo_get_trained_inputs(psb_intel_sdvo, &input1, &input2);
+               /* Warn if the device reported failure to sync.
+                * A lot of SDVO devices fail to notify of sync, but it's
+                * a given it the status is a success, we succeeded.
+                */
+               if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
+                       DRM_DEBUG_KMS("First %s output reported failure to "
+                                       "sync\n", SDVO_NAME(psb_intel_sdvo));
+               }
+
+               if (0)
+                       psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
+               psb_intel_sdvo_set_active_outputs(psb_intel_sdvo, psb_intel_sdvo->attached_output);
+       }
+       return;
+}
+
+static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
+                                struct drm_display_mode *mode)
+{
+       struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
+       if (psb_intel_sdvo->pixel_clock_min > mode->clock)
+               return MODE_CLOCK_LOW;
+
+       if (psb_intel_sdvo->pixel_clock_max < mode->clock)
+               return MODE_CLOCK_HIGH;
+
+       if (psb_intel_sdvo->is_lvds) {
+               if (mode->hdisplay > psb_intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
+                       return MODE_PANEL;
+
+               if (mode->vdisplay > psb_intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
+                       return MODE_PANEL;
+       }
+
+       return MODE_OK;
+}
+
+static bool psb_intel_sdvo_get_capabilities(struct psb_intel_sdvo *psb_intel_sdvo, struct psb_intel_sdvo_caps *caps)
+{
+       BUILD_BUG_ON(sizeof(*caps) != 8);
+       if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+                                 SDVO_CMD_GET_DEVICE_CAPS,
+                                 caps, sizeof(*caps)))
+               return false;
+
+       DRM_DEBUG_KMS("SDVO capabilities:\n"
+                     "  vendor_id: %d\n"
+                     "  device_id: %d\n"
+                     "  device_rev_id: %d\n"
+                     "  sdvo_version_major: %d\n"
+                     "  sdvo_version_minor: %d\n"
+                     "  sdvo_inputs_mask: %d\n"
+                     "  smooth_scaling: %d\n"
+                     "  sharp_scaling: %d\n"
+                     "  up_scaling: %d\n"
+                     "  down_scaling: %d\n"
+                     "  stall_support: %d\n"
+                     "  output_flags: %d\n",
+                     caps->vendor_id,
+                     caps->device_id,
+                     caps->device_rev_id,
+                     caps->sdvo_version_major,
+                     caps->sdvo_version_minor,
+                     caps->sdvo_inputs_mask,
+                     caps->smooth_scaling,
+                     caps->sharp_scaling,
+                     caps->up_scaling,
+                     caps->down_scaling,
+                     caps->stall_support,
+                     caps->output_flags);
+
+       return true;
+}
+
+/* No use! */
+#if 0
+struct drm_connector* psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
+{
+       struct drm_connector *connector = NULL;
+       struct psb_intel_sdvo *iout = NULL;
+       struct psb_intel_sdvo *sdvo;
+
+       /* find the sdvo connector */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               iout = to_psb_intel_sdvo(connector);
+
+               if (iout->type != INTEL_OUTPUT_SDVO)
+                       continue;
+
+               sdvo = iout->dev_priv;
+
+               if (sdvo->sdvo_reg == SDVOB && sdvoB)
+                       return connector;
+
+               if (sdvo->sdvo_reg == SDVOC && !sdvoB)
+                       return connector;
+
+       }
+
+       return NULL;
+}
+
+int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
+{
+       u8 response[2];
+       u8 status;
+       struct psb_intel_sdvo *psb_intel_sdvo;
+       DRM_DEBUG_KMS("\n");
+
+       if (!connector)
+               return 0;
+
+       psb_intel_sdvo = to_psb_intel_sdvo(connector);
+
+       return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+                                   &response, 2) && response[0];
+}
+
+void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
+{
+       u8 response[2];
+       u8 status;
+       struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(connector);
+
+       psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+       psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
+
+       if (on) {
+               psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+               status = psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
+
+               psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+       } else {
+               response[0] = 0;
+               response[1] = 0;
+               psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+       }
+
+       psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+       psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
+}
+#endif
+
+static bool
+psb_intel_sdvo_multifunc_encoder(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+       /* Is there more than one type of output? */
+       int caps = psb_intel_sdvo->caps.output_flags & 0xf;
+       return caps & -caps;
+}
+
+static struct edid *
+psb_intel_sdvo_get_edid(struct drm_connector *connector)
+{
+       struct psb_intel_sdvo *sdvo = intel_attached_sdvo(connector);
+       return drm_get_edid(connector, &sdvo->ddc);
+}
+
+/* Mac mini hack -- use the same DDC as the analog connector */
+static struct edid *
+psb_intel_sdvo_get_analog_edid(struct drm_connector *connector)
+{
+       struct drm_psb_private *dev_priv = connector->dev->dev_private;
+
+       return drm_get_edid(connector,
+                           &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+       return NULL;
+}
+
+enum drm_connector_status
+psb_intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
+{
+       struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+       enum drm_connector_status status;
+       struct edid *edid;
+
+       edid = psb_intel_sdvo_get_edid(connector);
+
+       if (edid == NULL && psb_intel_sdvo_multifunc_encoder(psb_intel_sdvo)) {
+               u8 ddc, saved_ddc = psb_intel_sdvo->ddc_bus;
+
+               /*
+                * Don't use the 1 as the argument of DDC bus switch to get
+                * the EDID. It is used for SDVO SPD ROM.
+                */
+               for (ddc = psb_intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
+                       psb_intel_sdvo->ddc_bus = ddc;
+                       edid = psb_intel_sdvo_get_edid(connector);
+                       if (edid)
+                               break;
+               }
+               /*
+                * If we found the EDID on the other bus,
+                * assume that is the correct DDC bus.
+                */
+               if (edid == NULL)
+                       psb_intel_sdvo->ddc_bus = saved_ddc;
+       }
+
+       /*
+        * When there is no edid and no monitor is connected with VGA
+        * port, try to use the CRT ddc to read the EDID for DVI-connector.
+        */
+       if (edid == NULL)
+               edid = psb_intel_sdvo_get_analog_edid(connector);
+
+       status = connector_status_unknown;
+       if (edid != NULL) {
+               /* DDC bus is shared, match EDID to connector type */
+               if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+                       status = connector_status_connected;
+                       if (psb_intel_sdvo->is_hdmi) {
+                               psb_intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
+                               psb_intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+                       }
+               } else
+                       status = connector_status_disconnected;
+               connector->display_info.raw_edid = NULL;
+               kfree(edid);
+       }
+
+       if (status == connector_status_connected) {
+               struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+               if (psb_intel_sdvo_connector->force_audio)
+                       psb_intel_sdvo->has_hdmi_audio = psb_intel_sdvo_connector->force_audio > 0;
+       }
+
+       return status;
+}
+
+static enum drm_connector_status
+psb_intel_sdvo_detect(struct drm_connector *connector, bool force)
+{
+       uint16_t response;
+       struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+       enum drm_connector_status ret;
+
+       if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo,
+                                 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
+               return connector_status_unknown;
+
+       /* add 30ms delay when the output type might be TV */
+       if (psb_intel_sdvo->caps.output_flags &
+           (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
+               mdelay(30);
+
+       if (!psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2))
+               return connector_status_unknown;
+
+       DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
+                     response & 0xff, response >> 8,
+                     psb_intel_sdvo_connector->output_flag);
+
+       if (response == 0)
+               return connector_status_disconnected;
+
+       psb_intel_sdvo->attached_output = response;
+
+       psb_intel_sdvo->has_hdmi_monitor = false;
+       psb_intel_sdvo->has_hdmi_audio = false;
+
+       if ((psb_intel_sdvo_connector->output_flag & response) == 0)
+               ret = connector_status_disconnected;
+       else if (IS_TMDS(psb_intel_sdvo_connector))
+               ret = psb_intel_sdvo_hdmi_sink_detect(connector);
+       else {
+               struct edid *edid;
+
+               /* if we have an edid check it matches the connection */
+               edid = psb_intel_sdvo_get_edid(connector);
+               if (edid == NULL)
+                       edid = psb_intel_sdvo_get_analog_edid(connector);
+               if (edid != NULL) {
+                       if (edid->input & DRM_EDID_INPUT_DIGITAL)
+                               ret = connector_status_disconnected;
+                       else
+                               ret = connector_status_connected;
+                       connector->display_info.raw_edid = NULL;
+                       kfree(edid);
+               } else
+                       ret = connector_status_connected;
+       }
+
+       /* May update encoder flag for like clock for SDVO TV, etc.*/
+       if (ret == connector_status_connected) {
+               psb_intel_sdvo->is_tv = false;
+               psb_intel_sdvo->is_lvds = false;
+               psb_intel_sdvo->base.needs_tv_clock = false;
+
+               if (response & SDVO_TV_MASK) {
+                       psb_intel_sdvo->is_tv = true;
+                       psb_intel_sdvo->base.needs_tv_clock = true;
+               }
+               if (response & SDVO_LVDS_MASK)
+                       psb_intel_sdvo->is_lvds = psb_intel_sdvo->sdvo_lvds_fixed_mode != NULL;
+       }
+
+       return ret;
+}
+
+static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector)
+{
+       struct edid *edid;
+
+       /* set the bus switch and get the modes */
+       edid = psb_intel_sdvo_get_edid(connector);
+
+       /*
+        * Mac mini hack.  On this device, the DVI-I connector shares one DDC
+        * link between analog and digital outputs. So, if the regular SDVO
+        * DDC fails, check to see if the analog output is disconnected, in
+        * which case we'll look there for the digital DDC data.
+        */
+       if (edid == NULL)
+               edid = psb_intel_sdvo_get_analog_edid(connector);
+
+       if (edid != NULL) {
+               struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+               bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+               bool connector_is_digital = !!IS_TMDS(psb_intel_sdvo_connector);
+
+               if (connector_is_digital == monitor_is_digital) {
+                       drm_mode_connector_update_edid_property(connector, edid);
+                       drm_add_edid_modes(connector, edid);
+               }
+
+               connector->display_info.raw_edid = NULL;
+               kfree(edid);
+       }
+}
+
+/*
+ * Set of SDVO TV modes.
+ * Note!  This is in reply order (see loop in get_tv_modes).
+ * XXX: all 60Hz refresh?
+ */
+static const struct drm_display_mode sdvo_tv_modes[] = {
+       { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
+                  416, 0, 200, 201, 232, 233, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384,
+                  416, 0, 240, 241, 272, 273, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464,
+                  496, 0, 300, 301, 332, 333, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704,
+                  736, 0, 350, 351, 382, 383, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704,
+                  736, 0, 400, 401, 432, 433, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704,
+                  736, 0, 480, 481, 512, 513, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768,
+                  800, 0, 480, 481, 512, 513, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768,
+                  800, 0, 576, 577, 608, 609, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784,
+                  816, 0, 350, 351, 382, 383, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784,
+                  816, 0, 400, 401, 432, 433, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784,
+                  816, 0, 480, 481, 512, 513, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784,
+                  816, 0, 540, 541, 572, 573, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784,
+                  816, 0, 576, 577, 608, 609, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832,
+                  864, 0, 576, 577, 608, 609, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864,
+                  896, 0, 600, 601, 632, 633, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896,
+                  928, 0, 624, 625, 656, 657, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984,
+                  1016, 0, 766, 767, 798, 799, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088,
+                  1120, 0, 768, 769, 800, 801, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+       { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344,
+                  1376, 0, 1024, 1025, 1056, 1057, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+
+static void psb_intel_sdvo_get_tv_modes(struct drm_connector *connector)
+{
+       struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+       struct psb_intel_sdvo_sdtv_resolution_request tv_res;
+       uint32_t reply = 0, format_map = 0;
+       int i;
+
+       /* Read the list of supported input resolutions for the selected TV
+        * format.
+        */
+       format_map = 1 << psb_intel_sdvo->tv_format_index;
+       memcpy(&tv_res, &format_map,
+              min(sizeof(format_map), sizeof(struct psb_intel_sdvo_sdtv_resolution_request)));
+
+       if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo, psb_intel_sdvo->attached_output))
+               return;
+
+       BUILD_BUG_ON(sizeof(tv_res) != 3);
+       if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo,
+                                 SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+                                 &tv_res, sizeof(tv_res)))
+               return;
+       if (!psb_intel_sdvo_read_response(psb_intel_sdvo, &reply, 3))
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
+               if (reply & (1 << i)) {
+                       struct drm_display_mode *nmode;
+                       nmode = drm_mode_duplicate(connector->dev,
+                                                  &sdvo_tv_modes[i]);
+                       if (nmode)
+                               drm_mode_probed_add(connector, nmode);
+               }
+}
+
+static void psb_intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+{
+       struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+       struct drm_psb_private *dev_priv = connector->dev->dev_private;
+       struct drm_display_mode *newmode;
+
+       /*
+        * Attempt to get the mode list from DDC.
+        * Assume that the preferred modes are
+        * arranged in priority order.
+        */
+       psb_intel_ddc_get_modes(connector, psb_intel_sdvo->i2c);
+       if (list_empty(&connector->probed_modes) == false)
+               goto end;
+
+       /* Fetch modes from VBT */
+       if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
+               newmode = drm_mode_duplicate(connector->dev,
+                                            dev_priv->sdvo_lvds_vbt_mode);
+               if (newmode != NULL) {
+                       /* Guarantee the mode is preferred */
+                       newmode->type = (DRM_MODE_TYPE_PREFERRED |
+                                        DRM_MODE_TYPE_DRIVER);
+                       drm_mode_probed_add(connector, newmode);
+               }
+       }
+
+end:
+       list_for_each_entry(newmode, &connector->probed_modes, head) {
+               if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+                       psb_intel_sdvo->sdvo_lvds_fixed_mode =
+                               drm_mode_duplicate(connector->dev, newmode);
+
+                       drm_mode_set_crtcinfo(psb_intel_sdvo->sdvo_lvds_fixed_mode,
+                                             0);
+
+                       psb_intel_sdvo->is_lvds = true;
+                       break;
+               }
+       }
+
+}
+
+static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
+{
+       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+
+       if (IS_TV(psb_intel_sdvo_connector))
+               psb_intel_sdvo_get_tv_modes(connector);
+       else if (IS_LVDS(psb_intel_sdvo_connector))
+               psb_intel_sdvo_get_lvds_modes(connector);
+       else
+               psb_intel_sdvo_get_ddc_modes(connector);
+
+       return !list_empty(&connector->probed_modes);
+}
+
+static void
+psb_intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
+{
+       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+       struct drm_device *dev = connector->dev;
+
+       if (psb_intel_sdvo_connector->left)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->left);
+       if (psb_intel_sdvo_connector->right)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->right);
+       if (psb_intel_sdvo_connector->top)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->top);
+       if (psb_intel_sdvo_connector->bottom)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->bottom);
+       if (psb_intel_sdvo_connector->hpos)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->hpos);
+       if (psb_intel_sdvo_connector->vpos)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->vpos);
+       if (psb_intel_sdvo_connector->saturation)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->saturation);
+       if (psb_intel_sdvo_connector->contrast)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->contrast);
+       if (psb_intel_sdvo_connector->hue)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->hue);
+       if (psb_intel_sdvo_connector->sharpness)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->sharpness);
+       if (psb_intel_sdvo_connector->flicker_filter)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter);
+       if (psb_intel_sdvo_connector->flicker_filter_2d)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_2d);
+       if (psb_intel_sdvo_connector->flicker_filter_adaptive)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_adaptive);
+       if (psb_intel_sdvo_connector->tv_luma_filter)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->tv_luma_filter);
+       if (psb_intel_sdvo_connector->tv_chroma_filter)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->tv_chroma_filter);
+       if (psb_intel_sdvo_connector->dot_crawl)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->dot_crawl);
+       if (psb_intel_sdvo_connector->brightness)
+               drm_property_destroy(dev, psb_intel_sdvo_connector->brightness);
+}
+
+static void psb_intel_sdvo_destroy(struct drm_connector *connector)
+{
+       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+
+       if (psb_intel_sdvo_connector->tv_format)
+               drm_property_destroy(connector->dev,
+                                    psb_intel_sdvo_connector->tv_format);
+
+       psb_intel_sdvo_destroy_enhance_property(connector);
+       drm_sysfs_connector_remove(connector);
+       drm_connector_cleanup(connector);
+       kfree(connector);
+}
+
+static bool psb_intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
+{
+       struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+       struct edid *edid;
+       bool has_audio = false;
+
+       if (!psb_intel_sdvo->is_hdmi)
+               return false;
+
+       edid = psb_intel_sdvo_get_edid(connector);
+       if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
+               has_audio = drm_detect_monitor_audio(edid);
+
+       return has_audio;
+}
+
+static int
+psb_intel_sdvo_set_property(struct drm_connector *connector,
+                       struct drm_property *property,
+                       uint64_t val)
+{
+       struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+       struct drm_psb_private *dev_priv = connector->dev->dev_private;
+       uint16_t temp_value;
+       uint8_t cmd;
+       int ret;
+
+       ret = drm_connector_property_set_value(connector, property, val);
+       if (ret)
+               return ret;
+
+       if (property == dev_priv->force_audio_property) {
+               int i = val;
+               bool has_audio;
+
+               if (i == psb_intel_sdvo_connector->force_audio)
+                       return 0;
+
+               psb_intel_sdvo_connector->force_audio = i;
+
+               if (i == 0)
+                       has_audio = psb_intel_sdvo_detect_hdmi_audio(connector);
+               else
+                       has_audio = i > 0;
+
+               if (has_audio == psb_intel_sdvo->has_hdmi_audio)
+                       return 0;
+
+               psb_intel_sdvo->has_hdmi_audio = has_audio;
+               goto done;
+       }
+
+       if (property == dev_priv->broadcast_rgb_property) {
+               if (val == !!psb_intel_sdvo->color_range)
+                       return 0;
+
+               psb_intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+               goto done;
+       }
+
+#define CHECK_PROPERTY(name, NAME) \
+       if (psb_intel_sdvo_connector->name == property) { \
+               if (psb_intel_sdvo_connector->cur_##name == temp_value) return 0; \
+               if (psb_intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
+               cmd = SDVO_CMD_SET_##NAME; \
+               psb_intel_sdvo_connector->cur_##name = temp_value; \
+               goto set_value; \
+       }
+
+       if (property == psb_intel_sdvo_connector->tv_format) {
+               if (val >= TV_FORMAT_NUM)
+                       return -EINVAL;
+
+               if (psb_intel_sdvo->tv_format_index ==
+                   psb_intel_sdvo_connector->tv_format_supported[val])
+                       return 0;
+
+               psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[val];
+               goto done;
+       } else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) {
+               temp_value = val;
+               if (psb_intel_sdvo_connector->left == property) {
+                       drm_connector_property_set_value(connector,
+                                                        psb_intel_sdvo_connector->right, val);
+                       if (psb_intel_sdvo_connector->left_margin == temp_value)
+                               return 0;
+
+                       psb_intel_sdvo_connector->left_margin = temp_value;
+                       psb_intel_sdvo_connector->right_margin = temp_value;
+                       temp_value = psb_intel_sdvo_connector->max_hscan -
+                               psb_intel_sdvo_connector->left_margin;
+                       cmd = SDVO_CMD_SET_OVERSCAN_H;
+                       goto set_value;
+               } else if (psb_intel_sdvo_connector->right == property) {
+                       drm_connector_property_set_value(connector,
+                                                        psb_intel_sdvo_connector->left, val);
+                       if (psb_intel_sdvo_connector->right_margin == temp_value)
+                               return 0;
+
+                       psb_intel_sdvo_connector->left_margin = temp_value;
+                       psb_intel_sdvo_connector->right_margin = temp_value;
+                       temp_value = psb_intel_sdvo_connector->max_hscan -
+                               psb_intel_sdvo_connector->left_margin;
+                       cmd = SDVO_CMD_SET_OVERSCAN_H;
+                       goto set_value;
+               } else if (psb_intel_sdvo_connector->top == property) {
+                       drm_connector_property_set_value(connector,
+                                                        psb_intel_sdvo_connector->bottom, val);
+                       if (psb_intel_sdvo_connector->top_margin == temp_value)
+                               return 0;
+
+                       psb_intel_sdvo_connector->top_margin = temp_value;
+                       psb_intel_sdvo_connector->bottom_margin = temp_value;
+                       temp_value = psb_intel_sdvo_connector->max_vscan -
+                               psb_intel_sdvo_connector->top_margin;
+                       cmd = SDVO_CMD_SET_OVERSCAN_V;
+                       goto set_value;
+               } else if (psb_intel_sdvo_connector->bottom == property) {
+                       drm_connector_property_set_value(connector,
+                                                        psb_intel_sdvo_connector->top, val);
+                       if (psb_intel_sdvo_connector->bottom_margin == temp_value)
+                               return 0;
+
+                       psb_intel_sdvo_connector->top_margin = temp_value;
+                       psb_intel_sdvo_connector->bottom_margin = temp_value;
+                       temp_value = psb_intel_sdvo_connector->max_vscan -
+                               psb_intel_sdvo_connector->top_margin;
+                       cmd = SDVO_CMD_SET_OVERSCAN_V;
+                       goto set_value;
+               }
+               CHECK_PROPERTY(hpos, HPOS)
+               CHECK_PROPERTY(vpos, VPOS)
+               CHECK_PROPERTY(saturation, SATURATION)
+               CHECK_PROPERTY(contrast, CONTRAST)
+               CHECK_PROPERTY(hue, HUE)
+               CHECK_PROPERTY(brightness, BRIGHTNESS)
+               CHECK_PROPERTY(sharpness, SHARPNESS)
+               CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
+               CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
+               CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
+               CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
+               CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
+               CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
+       }
+
+       return -EINVAL; /* unknown property */
+
+set_value:
+       if (!psb_intel_sdvo_set_value(psb_intel_sdvo, cmd, &temp_value, 2))
+               return -EIO;
+
+
+done:
+       if (psb_intel_sdvo->base.base.crtc) {
+               struct drm_crtc *crtc = psb_intel_sdvo->base.base.crtc;
+               drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
+                                        crtc->y, crtc->fb);
+       }
+
+       return 0;
+#undef CHECK_PROPERTY
+}
+
+static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
+       .dpms = psb_intel_sdvo_dpms,
+       .mode_fixup = psb_intel_sdvo_mode_fixup,
+       .prepare = psb_intel_encoder_prepare,
+       .mode_set = psb_intel_sdvo_mode_set,
+       .commit = psb_intel_encoder_commit,
+};
+
+static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .detect = psb_intel_sdvo_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .set_property = psb_intel_sdvo_set_property,
+       .destroy = psb_intel_sdvo_destroy,
+};
+
+static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = {
+       .get_modes = psb_intel_sdvo_get_modes,
+       .mode_valid = psb_intel_sdvo_mode_valid,
+       .best_encoder = psb_intel_best_encoder,
+};
+
+static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
+{
+       struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+
+       if (psb_intel_sdvo->sdvo_lvds_fixed_mode != NULL)
+               drm_mode_destroy(encoder->dev,
+                                psb_intel_sdvo->sdvo_lvds_fixed_mode);
+
+       i2c_del_adapter(&psb_intel_sdvo->ddc);
+       psb_intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
+       .destroy = psb_intel_sdvo_enc_destroy,
+};
+
+static void
+psb_intel_sdvo_guess_ddc_bus(struct psb_intel_sdvo *sdvo)
+{
+       /* FIXME: At the moment, ddc_bus = 2 is the only thing that works.
+        * We need to figure out if this is true for all available poulsbo
+        * hardware, or if we need to fiddle with the guessing code above.
+        * The problem might go away if we can parse sdvo mappings from bios */
+       sdvo->ddc_bus = 2;
+
+#if 0
+       uint16_t mask = 0;
+       unsigned int num_bits;
+
+       /* Make a mask of outputs less than or equal to our own priority in the
+        * list.
+        */
+       switch (sdvo->controlled_output) {
+       case SDVO_OUTPUT_LVDS1:
+               mask |= SDVO_OUTPUT_LVDS1;
+       case SDVO_OUTPUT_LVDS0:
+               mask |= SDVO_OUTPUT_LVDS0;
+       case SDVO_OUTPUT_TMDS1:
+               mask |= SDVO_OUTPUT_TMDS1;
+       case SDVO_OUTPUT_TMDS0:
+               mask |= SDVO_OUTPUT_TMDS0;
+       case SDVO_OUTPUT_RGB1:
+               mask |= SDVO_OUTPUT_RGB1;
+       case SDVO_OUTPUT_RGB0:
+               mask |= SDVO_OUTPUT_RGB0;
+               break;
+       }
+
+       /* Count bits to find what number we are in the priority list. */
+       mask &= sdvo->caps.output_flags;
+       num_bits = hweight16(mask);
+       /* If more than 3 outputs, default to DDC bus 3 for now. */
+       if (num_bits > 3)
+               num_bits = 3;
+
+       /* Corresponds to SDVO_CONTROL_BUS_DDCx */
+       sdvo->ddc_bus = 1 << num_bits;
+#endif
+}
+
+/**
+ * Choose the appropriate DDC bus for control bus switch command for this
+ * SDVO output based on the controlled output.
+ *
+ * DDC bus number assignment is in a priority order of RGB outputs, then TMDS
+ * outputs, then LVDS outputs.
+ */
+static void
+psb_intel_sdvo_select_ddc_bus(struct drm_psb_private *dev_priv,
+                         struct psb_intel_sdvo *sdvo, u32 reg)
+{
+       struct sdvo_device_mapping *mapping;
+
+       if (IS_SDVOB(reg))
+               mapping = &(dev_priv->sdvo_mappings[0]);
+       else
+               mapping = &(dev_priv->sdvo_mappings[1]);
+
+       if (mapping->initialized)
+               sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+       else
+               psb_intel_sdvo_guess_ddc_bus(sdvo);
+}
+
+static void
+psb_intel_sdvo_select_i2c_bus(struct drm_psb_private *dev_priv,
+                         struct psb_intel_sdvo *sdvo, u32 reg)
+{
+       struct sdvo_device_mapping *mapping;
+       u8 pin, speed;
+
+       if (IS_SDVOB(reg))
+               mapping = &dev_priv->sdvo_mappings[0];
+       else
+               mapping = &dev_priv->sdvo_mappings[1];
+
+       pin = GMBUS_PORT_DPB;
+       speed = GMBUS_RATE_1MHZ >> 8;
+       if (mapping->initialized) {
+               pin = mapping->i2c_pin;
+               speed = mapping->i2c_speed;
+       }
+
+       if (pin < GMBUS_NUM_PORTS) {
+               sdvo->i2c = &dev_priv->gmbus[pin].adapter;
+               gma_intel_gmbus_set_speed(sdvo->i2c, speed);
+               gma_intel_gmbus_force_bit(sdvo->i2c, true);
+       } else
+               sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
+}
+
+static bool
+psb_intel_sdvo_is_hdmi_connector(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+       return psb_intel_sdvo_check_supp_encode(psb_intel_sdvo);
+}
+
+static u8
+psb_intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct sdvo_device_mapping *my_mapping, *other_mapping;
+
+       if (IS_SDVOB(sdvo_reg)) {
+               my_mapping = &dev_priv->sdvo_mappings[0];
+               other_mapping = &dev_priv->sdvo_mappings[1];
+       } else {
+               my_mapping = &dev_priv->sdvo_mappings[1];
+               other_mapping = &dev_priv->sdvo_mappings[0];
+       }
+
+       /* If the BIOS described our SDVO device, take advantage of it. */
+       if (my_mapping->slave_addr)
+               return my_mapping->slave_addr;
+
+       /* If the BIOS only described a different SDVO device, use the
+        * address that it isn't using.
+        */
+       if (other_mapping->slave_addr) {
+               if (other_mapping->slave_addr == 0x70)
+                       return 0x72;
+               else
+                       return 0x70;
+       }
+
+       /* No SDVO device info is found for another DVO port,
+        * so use mapping assumption we had before BIOS parsing.
+        */
+       if (IS_SDVOB(sdvo_reg))
+               return 0x70;
+       else
+               return 0x72;
+}
+
+static void
+psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
+                         struct psb_intel_sdvo *encoder)
+{
+       drm_connector_init(encoder->base.base.dev,
+                          &connector->base.base,
+                          &psb_intel_sdvo_connector_funcs,
+                          connector->base.base.connector_type);
+
+       drm_connector_helper_add(&connector->base.base,
+                                &psb_intel_sdvo_connector_helper_funcs);
+
+       connector->base.base.interlace_allowed = 0;
+       connector->base.base.doublescan_allowed = 0;
+       connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+
+       psb_intel_connector_attach_encoder(&connector->base, &encoder->base);
+       drm_sysfs_connector_add(&connector->base.base);
+}
+
+static void
+psb_intel_sdvo_add_hdmi_properties(struct psb_intel_sdvo_connector *connector)
+{
+       /* FIXME: We don't support HDMI at the moment
+       struct drm_device *dev = connector->base.base.dev;
+
+       intel_attach_force_audio_property(&connector->base.base);
+       if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
+               intel_attach_broadcast_rgb_property(&connector->base.base);
+       */
+}
+
+static bool
+psb_intel_sdvo_dvi_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+       struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+       struct drm_connector *connector;
+       struct psb_intel_connector *intel_connector;
+       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+       psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+       if (!psb_intel_sdvo_connector)
+               return false;
+
+       if (device == 0) {
+               psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
+               psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+       } else if (device == 1) {
+               psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
+               psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+       }
+
+       intel_connector = &psb_intel_sdvo_connector->base;
+       connector = &intel_connector->base;
+       // connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+       encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+       connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+
+       if (psb_intel_sdvo_is_hdmi_connector(psb_intel_sdvo, device)) {
+               connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+               psb_intel_sdvo->is_hdmi = true;
+       }
+       psb_intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+                                      (1 << INTEL_ANALOG_CLONE_BIT));
+
+       psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
+       if (psb_intel_sdvo->is_hdmi)
+               psb_intel_sdvo_add_hdmi_properties(psb_intel_sdvo_connector);
+
+       return true;
+}
+
+static bool
+psb_intel_sdvo_tv_init(struct psb_intel_sdvo *psb_intel_sdvo, int type)
+{
+       struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+       struct drm_connector *connector;
+       struct psb_intel_connector *intel_connector;
+       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+       psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+       if (!psb_intel_sdvo_connector)
+               return false;
+
+       intel_connector = &psb_intel_sdvo_connector->base;
+       connector = &intel_connector->base;
+       encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+       connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+
+       psb_intel_sdvo->controlled_output |= type;
+       psb_intel_sdvo_connector->output_flag = type;
+
+       psb_intel_sdvo->is_tv = true;
+       psb_intel_sdvo->base.needs_tv_clock = true;
+       psb_intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+
+       psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
+
+       if (!psb_intel_sdvo_tv_create_property(psb_intel_sdvo, psb_intel_sdvo_connector, type))
+               goto err;
+
+       if (!psb_intel_sdvo_create_enhance_property(psb_intel_sdvo, psb_intel_sdvo_connector))
+               goto err;
+
+       return true;
+
+err:
+       psb_intel_sdvo_destroy(connector);
+       return false;
+}
+
+static bool
+psb_intel_sdvo_analog_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+       struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+       struct drm_connector *connector;
+       struct psb_intel_connector *intel_connector;
+       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+       psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+       if (!psb_intel_sdvo_connector)
+               return false;
+
+       intel_connector = &psb_intel_sdvo_connector->base;
+       connector = &intel_connector->base;
+       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+       encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+       connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+
+       if (device == 0) {
+               psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+               psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+       } else if (device == 1) {
+               psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+               psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+       }
+
+       psb_intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+                                      (1 << INTEL_ANALOG_CLONE_BIT));
+
+       psb_intel_sdvo_connector_init(psb_intel_sdvo_connector,
+                                 psb_intel_sdvo);
+       return true;
+}
+
+static bool
+psb_intel_sdvo_lvds_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+       struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+       struct drm_connector *connector;
+       struct psb_intel_connector *intel_connector;
+       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+       psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+       if (!psb_intel_sdvo_connector)
+               return false;
+
+       intel_connector = &psb_intel_sdvo_connector->base;
+       connector = &intel_connector->base;
+       encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+       connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+
+       if (device == 0) {
+               psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+               psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+       } else if (device == 1) {
+               psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+               psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+       }
+
+       psb_intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
+                                      (1 << INTEL_SDVO_LVDS_CLONE_BIT));
+
+       psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
+       if (!psb_intel_sdvo_create_enhance_property(psb_intel_sdvo, psb_intel_sdvo_connector))
+               goto err;
+
+       return true;
+
+err:
+       psb_intel_sdvo_destroy(connector);
+       return false;
+}
+
+static bool
+psb_intel_sdvo_output_setup(struct psb_intel_sdvo *psb_intel_sdvo, uint16_t flags)
+{
+       psb_intel_sdvo->is_tv = false;
+       psb_intel_sdvo->base.needs_tv_clock = false;
+       psb_intel_sdvo->is_lvds = false;
+
+       /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
+
+       if (flags & SDVO_OUTPUT_TMDS0)
+               if (!psb_intel_sdvo_dvi_init(psb_intel_sdvo, 0))
+                       return false;
+
+       if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
+               if (!psb_intel_sdvo_dvi_init(psb_intel_sdvo, 1))
+                       return false;
+
+       /* TV has no XXX1 function block */
+       if (flags & SDVO_OUTPUT_SVID0)
+               if (!psb_intel_sdvo_tv_init(psb_intel_sdvo, SDVO_OUTPUT_SVID0))
+                       return false;
+
+       if (flags & SDVO_OUTPUT_CVBS0)
+               if (!psb_intel_sdvo_tv_init(psb_intel_sdvo, SDVO_OUTPUT_CVBS0))
+                       return false;
+
+       if (flags & SDVO_OUTPUT_RGB0)
+               if (!psb_intel_sdvo_analog_init(psb_intel_sdvo, 0))
+                       return false;
+
+       if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
+               if (!psb_intel_sdvo_analog_init(psb_intel_sdvo, 1))
+                       return false;
+
+       if (flags & SDVO_OUTPUT_LVDS0)
+               if (!psb_intel_sdvo_lvds_init(psb_intel_sdvo, 0))
+                       return false;
+
+       if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
+               if (!psb_intel_sdvo_lvds_init(psb_intel_sdvo, 1))
+                       return false;
+
+       if ((flags & SDVO_OUTPUT_MASK) == 0) {
+               unsigned char bytes[2];
+
+               psb_intel_sdvo->controlled_output = 0;
+               memcpy(bytes, &psb_intel_sdvo->caps.output_flags, 2);
+               DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
+                             SDVO_NAME(psb_intel_sdvo),
+                             bytes[0], bytes[1]);
+               return false;
+       }
+       psb_intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
+
+       return true;
+}
+
+static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_sdvo,
+                                         struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+                                         int type)
+{
+       struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+       struct psb_intel_sdvo_tv_format format;
+       uint32_t format_map, i;
+
+       if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo, type))
+               return false;
+
+       BUILD_BUG_ON(sizeof(format) != 6);
+       if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+                                 SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
+                                 &format, sizeof(format)))
+               return false;
+
+       memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
+
+       if (format_map == 0)
+               return false;
+
+       psb_intel_sdvo_connector->format_supported_num = 0;
+       for (i = 0 ; i < TV_FORMAT_NUM; i++)
+               if (format_map & (1 << i))
+                       psb_intel_sdvo_connector->tv_format_supported[psb_intel_sdvo_connector->format_supported_num++] = i;
+
+
+       psb_intel_sdvo_connector->tv_format =
+                       drm_property_create(dev, DRM_MODE_PROP_ENUM,
+                                           "mode", psb_intel_sdvo_connector->format_supported_num);
+       if (!psb_intel_sdvo_connector->tv_format)
+               return false;
+
+       for (i = 0; i < psb_intel_sdvo_connector->format_supported_num; i++)
+               drm_property_add_enum(
+                               psb_intel_sdvo_connector->tv_format, i,
+                               i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
+
+       psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
+       drm_connector_attach_property(&psb_intel_sdvo_connector->base.base,
+                                     psb_intel_sdvo_connector->tv_format, 0);
+       return true;
+
+}
+
+#define ENHANCEMENT(name, NAME) do { \
+       if (enhancements.name) { \
+               if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
+                   !psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
+                       return false; \
+               psb_intel_sdvo_connector->max_##name = data_value[0]; \
+               psb_intel_sdvo_connector->cur_##name = response; \
+               psb_intel_sdvo_connector->name = \
+                       drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \
+               if (!psb_intel_sdvo_connector->name) return false; \
+               psb_intel_sdvo_connector->name->values[0] = 0; \
+               psb_intel_sdvo_connector->name->values[1] = data_value[0]; \
+               drm_connector_attach_property(connector, \
+                                             psb_intel_sdvo_connector->name, \
+                                             psb_intel_sdvo_connector->cur_##name); \
+               DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
+                             data_value[0], data_value[1], response); \
+       } \
+} while(0)
+
+static bool
+psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
+                                     struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+                                     struct psb_intel_sdvo_enhancements_reply enhancements)
+{
+       struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+       struct drm_connector *connector = &psb_intel_sdvo_connector->base.base;
+       uint16_t response, data_value[2];
+
+       /* when horizontal overscan is supported, Add the left/right  property */
+       if (enhancements.overscan_h) {
+               if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+                                         SDVO_CMD_GET_MAX_OVERSCAN_H,
+                                         &data_value, 4))
+                       return false;
+
+               if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+                                         SDVO_CMD_GET_OVERSCAN_H,
+                                         &response, 2))
+                       return false;
+
+               psb_intel_sdvo_connector->max_hscan = data_value[0];
+               psb_intel_sdvo_connector->left_margin = data_value[0] - response;
+               psb_intel_sdvo_connector->right_margin = psb_intel_sdvo_connector->left_margin;
+               psb_intel_sdvo_connector->left =
+                       drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                           "left_margin", 2);
+               if (!psb_intel_sdvo_connector->left)
+                       return false;
+
+               psb_intel_sdvo_connector->left->values[0] = 0;
+               psb_intel_sdvo_connector->left->values[1] = data_value[0];
+               drm_connector_attach_property(connector,
+                                             psb_intel_sdvo_connector->left,
+                                             psb_intel_sdvo_connector->left_margin);
+
+               psb_intel_sdvo_connector->right =
+                       drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                           "right_margin", 2);
+               if (!psb_intel_sdvo_connector->right)
+                       return false;
+
+               psb_intel_sdvo_connector->right->values[0] = 0;
+               psb_intel_sdvo_connector->right->values[1] = data_value[0];
+               drm_connector_attach_property(connector,
+                                             psb_intel_sdvo_connector->right,
+                                             psb_intel_sdvo_connector->right_margin);
+               DRM_DEBUG_KMS("h_overscan: max %d, "
+                             "default %d, current %d\n",
+                             data_value[0], data_value[1], response);
+       }
+
+       if (enhancements.overscan_v) {
+               if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+                                         SDVO_CMD_GET_MAX_OVERSCAN_V,
+                                         &data_value, 4))
+                       return false;
+
+               if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+                                         SDVO_CMD_GET_OVERSCAN_V,
+                                         &response, 2))
+                       return false;
+
+               psb_intel_sdvo_connector->max_vscan = data_value[0];
+               psb_intel_sdvo_connector->top_margin = data_value[0] - response;
+               psb_intel_sdvo_connector->bottom_margin = psb_intel_sdvo_connector->top_margin;
+               psb_intel_sdvo_connector->top =
+                       drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                           "top_margin", 2);
+               if (!psb_intel_sdvo_connector->top)
+                       return false;
+
+               psb_intel_sdvo_connector->top->values[0] = 0;
+               psb_intel_sdvo_connector->top->values[1] = data_value[0];
+               drm_connector_attach_property(connector,
+                                             psb_intel_sdvo_connector->top,
+                                             psb_intel_sdvo_connector->top_margin);
+
+               psb_intel_sdvo_connector->bottom =
+                       drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                           "bottom_margin", 2);
+               if (!psb_intel_sdvo_connector->bottom)
+                       return false;
+
+               psb_intel_sdvo_connector->bottom->values[0] = 0;
+               psb_intel_sdvo_connector->bottom->values[1] = data_value[0];
+               drm_connector_attach_property(connector,
+                                             psb_intel_sdvo_connector->bottom,
+                                             psb_intel_sdvo_connector->bottom_margin);
+               DRM_DEBUG_KMS("v_overscan: max %d, "
+                             "default %d, current %d\n",
+                             data_value[0], data_value[1], response);
+       }
+
+       ENHANCEMENT(hpos, HPOS);
+       ENHANCEMENT(vpos, VPOS);
+       ENHANCEMENT(saturation, SATURATION);
+       ENHANCEMENT(contrast, CONTRAST);
+       ENHANCEMENT(hue, HUE);
+       ENHANCEMENT(sharpness, SHARPNESS);
+       ENHANCEMENT(brightness, BRIGHTNESS);
+       ENHANCEMENT(flicker_filter, FLICKER_FILTER);
+       ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
+       ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
+       ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
+       ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
+
+       if (enhancements.dot_crawl) {
+               if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
+                       return false;
+
+               psb_intel_sdvo_connector->max_dot_crawl = 1;
+               psb_intel_sdvo_connector->cur_dot_crawl = response & 0x1;
+               psb_intel_sdvo_connector->dot_crawl =
+                       drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2);
+               if (!psb_intel_sdvo_connector->dot_crawl)
+                       return false;
+
+               psb_intel_sdvo_connector->dot_crawl->values[0] = 0;
+               psb_intel_sdvo_connector->dot_crawl->values[1] = 1;
+               drm_connector_attach_property(connector,
+                                             psb_intel_sdvo_connector->dot_crawl,
+                                             psb_intel_sdvo_connector->cur_dot_crawl);
+               DRM_DEBUG_KMS("dot crawl: current %d\n", response);
+       }
+
+       return true;
+}
+
+static bool
+psb_intel_sdvo_create_enhance_property_lvds(struct psb_intel_sdvo *psb_intel_sdvo,
+                                       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+                                       struct psb_intel_sdvo_enhancements_reply enhancements)
+{
+       struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+       struct drm_connector *connector = &psb_intel_sdvo_connector->base.base;
+       uint16_t response, data_value[2];
+
+       ENHANCEMENT(brightness, BRIGHTNESS);
+
+       return true;
+}
+#undef ENHANCEMENT
+
+static bool psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
+                                              struct psb_intel_sdvo_connector *psb_intel_sdvo_connector)
+{
+       union {
+               struct psb_intel_sdvo_enhancements_reply reply;
+               uint16_t response;
+       } enhancements;
+
+       BUILD_BUG_ON(sizeof(enhancements) != 2);
+
+       enhancements.response = 0;
+       psb_intel_sdvo_get_value(psb_intel_sdvo,
+                            SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+                            &enhancements, sizeof(enhancements));
+       if (enhancements.response == 0) {
+               DRM_DEBUG_KMS("No enhancement is supported\n");
+               return true;
+       }
+
+       if (IS_TV(psb_intel_sdvo_connector))
+               return psb_intel_sdvo_create_enhance_property_tv(psb_intel_sdvo, psb_intel_sdvo_connector, enhancements.reply);
+       else if(IS_LVDS(psb_intel_sdvo_connector))
+               return psb_intel_sdvo_create_enhance_property_lvds(psb_intel_sdvo, psb_intel_sdvo_connector, enhancements.reply);
+       else
+               return true;
+}
+
+static int psb_intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
+                                    struct i2c_msg *msgs,
+                                    int num)
+{
+       struct psb_intel_sdvo *sdvo = adapter->algo_data;
+
+       if (!psb_intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
+               return -EIO;
+
+       return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
+}
+
+static u32 psb_intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
+{
+       struct psb_intel_sdvo *sdvo = adapter->algo_data;
+       return sdvo->i2c->algo->functionality(sdvo->i2c);
+}
+
+static const struct i2c_algorithm psb_intel_sdvo_ddc_proxy = {
+       .master_xfer    = psb_intel_sdvo_ddc_proxy_xfer,
+       .functionality  = psb_intel_sdvo_ddc_proxy_func
+};
+
+static bool
+psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
+                         struct drm_device *dev)
+{
+       sdvo->ddc.owner = THIS_MODULE;
+       sdvo->ddc.class = I2C_CLASS_DDC;
+       snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
+       sdvo->ddc.dev.parent = &dev->pdev->dev;
+       sdvo->ddc.algo_data = sdvo;
+       sdvo->ddc.algo = &psb_intel_sdvo_ddc_proxy;
+
+       return i2c_add_adapter(&sdvo->ddc) == 0;
+}
+
+bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct psb_intel_encoder *psb_intel_encoder;
+       struct psb_intel_sdvo *psb_intel_sdvo;
+       int i;
+
+       psb_intel_sdvo = kzalloc(sizeof(struct psb_intel_sdvo), GFP_KERNEL);
+       if (!psb_intel_sdvo)
+               return false;
+
+       psb_intel_sdvo->sdvo_reg = sdvo_reg;
+       psb_intel_sdvo->slave_addr = psb_intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
+       psb_intel_sdvo_select_i2c_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
+       if (!psb_intel_sdvo_init_ddc_proxy(psb_intel_sdvo, dev)) {
+               kfree(psb_intel_sdvo);
+               return false;
+       }
+
+       /* encoder type will be decided later */
+       psb_intel_encoder = &psb_intel_sdvo->base;
+       psb_intel_encoder->type = INTEL_OUTPUT_SDVO;
+       drm_encoder_init(dev, &psb_intel_encoder->base, &psb_intel_sdvo_enc_funcs, 0);
+
+       /* Read the regs to test if we can talk to the device */
+       for (i = 0; i < 0x40; i++) {
+               u8 byte;
+
+               if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, i, &byte)) {
+                       DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
+                                     IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+                       goto err;
+               }
+       }
+
+       if (IS_SDVOB(sdvo_reg))
+               dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
+       else
+               dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
+
+       drm_encoder_helper_add(&psb_intel_encoder->base, &psb_intel_sdvo_helper_funcs);
+
+       /* In default case sdvo lvds is false */
+       if (!psb_intel_sdvo_get_capabilities(psb_intel_sdvo, &psb_intel_sdvo->caps))
+               goto err;
+
+       if (psb_intel_sdvo_output_setup(psb_intel_sdvo,
+                                   psb_intel_sdvo->caps.output_flags) != true) {
+               DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
+                             IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+               goto err;
+       }
+
+       psb_intel_sdvo_select_ddc_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
+
+       /* Set the input timing to the screen. Assume always input 0. */
+       if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
+               goto err;
+
+       if (!psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_sdvo,
+                                                   &psb_intel_sdvo->pixel_clock_min,
+                                                   &psb_intel_sdvo->pixel_clock_max))
+               goto err;
+
+       DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
+                       "clock range %dMHz - %dMHz, "
+                       "input 1: %c, input 2: %c, "
+                       "output 1: %c, output 2: %c\n",
+                       SDVO_NAME(psb_intel_sdvo),
+                       psb_intel_sdvo->caps.vendor_id, psb_intel_sdvo->caps.device_id,
+                       psb_intel_sdvo->caps.device_rev_id,
+                       psb_intel_sdvo->pixel_clock_min / 1000,
+                       psb_intel_sdvo->pixel_clock_max / 1000,
+                       (psb_intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+                       (psb_intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+                       /* check currently supported outputs */
+                       psb_intel_sdvo->caps.output_flags &
+                       (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
+                       psb_intel_sdvo->caps.output_flags &
+                       (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+       return true;
+
+err:
+       drm_encoder_cleanup(&psb_intel_encoder->base);
+       i2c_del_adapter(&psb_intel_sdvo->ddc);
+       kfree(psb_intel_sdvo);
+
+       return false;
+}
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h b/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h
new file mode 100644 (file)
index 0000000..600e797
--- /dev/null
@@ -0,0 +1,723 @@
+/*
+ * Copyright ? 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+
+/**
+ * @file SDVO command definitions and structures.
+ */
+
+#define SDVO_OUTPUT_FIRST   (0)
+#define SDVO_OUTPUT_TMDS0   (1 << 0)
+#define SDVO_OUTPUT_RGB0    (1 << 1)
+#define SDVO_OUTPUT_CVBS0   (1 << 2)
+#define SDVO_OUTPUT_SVID0   (1 << 3)
+#define SDVO_OUTPUT_YPRPB0  (1 << 4)
+#define SDVO_OUTPUT_SCART0  (1 << 5)
+#define SDVO_OUTPUT_LVDS0   (1 << 6)
+#define SDVO_OUTPUT_TMDS1   (1 << 8)
+#define SDVO_OUTPUT_RGB1    (1 << 9)
+#define SDVO_OUTPUT_CVBS1   (1 << 10)
+#define SDVO_OUTPUT_SVID1   (1 << 11)
+#define SDVO_OUTPUT_YPRPB1  (1 << 12)
+#define SDVO_OUTPUT_SCART1  (1 << 13)
+#define SDVO_OUTPUT_LVDS1   (1 << 14)
+#define SDVO_OUTPUT_LAST    (14)
+
+struct psb_intel_sdvo_caps {
+    u8 vendor_id;
+    u8 device_id;
+    u8 device_rev_id;
+    u8 sdvo_version_major;
+    u8 sdvo_version_minor;
+    unsigned int sdvo_inputs_mask:2;
+    unsigned int smooth_scaling:1;
+    unsigned int sharp_scaling:1;
+    unsigned int up_scaling:1;
+    unsigned int down_scaling:1;
+    unsigned int stall_support:1;
+    unsigned int pad:1;
+    u16 output_flags;
+} __attribute__((packed));
+
+/** This matches the EDID DTD structure, more or less */
+struct psb_intel_sdvo_dtd {
+    struct {
+       u16 clock;              /**< pixel clock, in 10kHz units */
+       u8 h_active;            /**< lower 8 bits (pixels) */
+       u8 h_blank;             /**< lower 8 bits (pixels) */
+       u8 h_high;              /**< upper 4 bits each h_active, h_blank */
+       u8 v_active;            /**< lower 8 bits (lines) */
+       u8 v_blank;             /**< lower 8 bits (lines) */
+       u8 v_high;              /**< upper 4 bits each v_active, v_blank */
+    } part1;
+
+    struct {
+       u8 h_sync_off;  /**< lower 8 bits, from hblank start */
+       u8 h_sync_width;        /**< lower 8 bits (pixels) */
+       /** lower 4 bits each vsync offset, vsync width */
+       u8 v_sync_off_width;
+       /**
+        * 2 high bits of hsync offset, 2 high bits of hsync width,
+        * bits 4-5 of vsync offset, and 2 high bits of vsync width.
+        */
+       u8 sync_off_width_high;
+       u8 dtd_flags;
+       u8 sdvo_flags;
+       /** bits 6-7 of vsync offset at bits 6-7 */
+       u8 v_sync_off_high;
+       u8 reserved;
+    } part2;
+} __attribute__((packed));
+
+struct psb_intel_sdvo_pixel_clock_range {
+    u16 min;                   /**< pixel clock, in 10kHz units */
+    u16 max;                   /**< pixel clock, in 10kHz units */
+} __attribute__((packed));
+
+struct psb_intel_sdvo_preferred_input_timing_args {
+    u16 clock;
+    u16 width;
+    u16 height;
+    u8 interlace:1;
+    u8 scaled:1;
+    u8 pad:6;
+} __attribute__((packed));
+
+/* I2C registers for SDVO */
+#define SDVO_I2C_ARG_0                         0x07
+#define SDVO_I2C_ARG_1                         0x06
+#define SDVO_I2C_ARG_2                         0x05
+#define SDVO_I2C_ARG_3                         0x04
+#define SDVO_I2C_ARG_4                         0x03
+#define SDVO_I2C_ARG_5                         0x02
+#define SDVO_I2C_ARG_6                         0x01
+#define SDVO_I2C_ARG_7                         0x00
+#define SDVO_I2C_OPCODE                                0x08
+#define SDVO_I2C_CMD_STATUS                    0x09
+#define SDVO_I2C_RETURN_0                      0x0a
+#define SDVO_I2C_RETURN_1                      0x0b
+#define SDVO_I2C_RETURN_2                      0x0c
+#define SDVO_I2C_RETURN_3                      0x0d
+#define SDVO_I2C_RETURN_4                      0x0e
+#define SDVO_I2C_RETURN_5                      0x0f
+#define SDVO_I2C_RETURN_6                      0x10
+#define SDVO_I2C_RETURN_7                      0x11
+#define SDVO_I2C_VENDOR_BEGIN                  0x20
+
+/* Status results */
+#define SDVO_CMD_STATUS_POWER_ON               0x0
+#define SDVO_CMD_STATUS_SUCCESS                        0x1
+#define SDVO_CMD_STATUS_NOTSUPP                        0x2
+#define SDVO_CMD_STATUS_INVALID_ARG            0x3
+#define SDVO_CMD_STATUS_PENDING                        0x4
+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED   0x5
+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP       0x6
+
+/* SDVO commands, argument/result registers */
+
+#define SDVO_CMD_RESET                                 0x01
+
+/** Returns a struct intel_sdvo_caps */
+#define SDVO_CMD_GET_DEVICE_CAPS                       0x02
+
+#define SDVO_CMD_GET_FIRMWARE_REV                      0x86
+# define SDVO_DEVICE_FIRMWARE_MINOR                    SDVO_I2C_RETURN_0
+# define SDVO_DEVICE_FIRMWARE_MAJOR                    SDVO_I2C_RETURN_1
+# define SDVO_DEVICE_FIRMWARE_PATCH                    SDVO_I2C_RETURN_2
+
+/**
+ * Reports which inputs are trained (managed to sync).
+ *
+ * Devices must have trained within 2 vsyncs of a mode change.
+ */
+#define SDVO_CMD_GET_TRAINED_INPUTS                    0x03
+struct psb_intel_sdvo_get_trained_inputs_response {
+    unsigned int input0_trained:1;
+    unsigned int input1_trained:1;
+    unsigned int pad:6;
+} __attribute__((packed));
+
+/** Returns a struct intel_sdvo_output_flags of active outputs. */
+#define SDVO_CMD_GET_ACTIVE_OUTPUTS                    0x04
+
+/**
+ * Sets the current set of active outputs.
+ *
+ * Takes a struct intel_sdvo_output_flags.  Must be preceded by a SET_IN_OUT_MAP
+ * on multi-output devices.
+ */
+#define SDVO_CMD_SET_ACTIVE_OUTPUTS                    0x05
+
+/**
+ * Returns the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Returns two struct intel_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_GET_IN_OUT_MAP                                0x06
+struct psb_intel_sdvo_in_out_map {
+    u16 in0, in1;
+};
+
+/**
+ * Sets the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Takes two struct i380_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_SET_IN_OUT_MAP                                0x07
+
+/**
+ * Returns a struct intel_sdvo_output_flags of attached displays.
+ */
+#define SDVO_CMD_GET_ATTACHED_DISPLAYS                 0x0b
+
+/**
+ * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
+ */
+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT                  0x0c
+
+/**
+ * Takes a struct intel_sdvo_output_flags.
+ */
+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG                   0x0d
+
+/**
+ * Returns a struct intel_sdvo_output_flags of displays with hot plug
+ * interrupts enabled.
+ */
+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG                   0x0e
+
+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE            0x0f
+struct intel_sdvo_get_interrupt_event_source_response {
+    u16 interrupt_status;
+    unsigned int ambient_light_interrupt:1;
+    unsigned int hdmi_audio_encrypt_change:1;
+    unsigned int pad:6;
+} __attribute__((packed));
+
+/**
+ * Selects which input is affected by future input commands.
+ *
+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
+ */
+#define SDVO_CMD_SET_TARGET_INPUT                      0x10
+struct psb_intel_sdvo_set_target_input_args {
+    unsigned int target_1:1;
+    unsigned int pad:7;
+} __attribute__((packed));
+
+/**
+ * Takes a struct intel_sdvo_output_flags of which outputs are targeted by
+ * future output commands.
+ *
+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
+ */
+#define SDVO_CMD_SET_TARGET_OUTPUT                     0x11
+
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1               0x12
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2               0x13
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1               0x14
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2               0x15
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1              0x16
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2              0x17
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1              0x18
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2              0x19
+/* Part 1 */
+# define SDVO_DTD_CLOCK_LOW                            SDVO_I2C_ARG_0
+# define SDVO_DTD_CLOCK_HIGH                           SDVO_I2C_ARG_1
+# define SDVO_DTD_H_ACTIVE                             SDVO_I2C_ARG_2
+# define SDVO_DTD_H_BLANK                              SDVO_I2C_ARG_3
+# define SDVO_DTD_H_HIGH                               SDVO_I2C_ARG_4
+# define SDVO_DTD_V_ACTIVE                             SDVO_I2C_ARG_5
+# define SDVO_DTD_V_BLANK                              SDVO_I2C_ARG_6
+# define SDVO_DTD_V_HIGH                               SDVO_I2C_ARG_7
+/* Part 2 */
+# define SDVO_DTD_HSYNC_OFF                            SDVO_I2C_ARG_0
+# define SDVO_DTD_HSYNC_WIDTH                          SDVO_I2C_ARG_1
+# define SDVO_DTD_VSYNC_OFF_WIDTH                      SDVO_I2C_ARG_2
+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH                  SDVO_I2C_ARG_3
+# define SDVO_DTD_DTD_FLAGS                            SDVO_I2C_ARG_4
+# define SDVO_DTD_DTD_FLAG_INTERLACED                          (1 << 7)
+# define SDVO_DTD_DTD_FLAG_STEREO_MASK                         (3 << 5)
+# define SDVO_DTD_DTD_FLAG_INPUT_MASK                          (3 << 3)
+# define SDVO_DTD_DTD_FLAG_SYNC_MASK                           (3 << 1)
+# define SDVO_DTD_SDVO_FLAS                            SDVO_I2C_ARG_5
+# define SDVO_DTD_SDVO_FLAG_STALL                              (1 << 7)
+# define SDVO_DTD_SDVO_FLAG_CENTERED                           (0 << 6)
+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT                         (1 << 6)
+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK                       (3 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE                       (0 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP                      (1 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH                     (2 << 4)
+# define SDVO_DTD_VSYNC_OFF_HIGH                       SDVO_I2C_ARG_6
+
+/**
+ * Generates a DTD based on the given width, height, and flags.
+ *
+ * This will be supported by any device supporting scaling or interlaced
+ * modes.
+ */
+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING         0x1a
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW         SDVO_I2C_ARG_0
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH                SDVO_I2C_ARG_1
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW         SDVO_I2C_ARG_2
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH                SDVO_I2C_ARG_3
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW                SDVO_I2C_ARG_4
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH       SDVO_I2C_ARG_5
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS             SDVO_I2C_ARG_6
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED          (1 << 0)
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED              (1 << 1)
+
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1      0x1b
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2      0x1c
+
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE           0x1d
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE          0x1e
+
+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS                0x1f
+
+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_GET_CLOCK_RATE_MULT                   0x20
+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_SET_CLOCK_RATE_MULT                   0x21
+# define SDVO_CLOCK_RATE_MULT_1X                               (1 << 0)
+# define SDVO_CLOCK_RATE_MULT_2X                               (1 << 1)
+# define SDVO_CLOCK_RATE_MULT_4X                               (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS              0x27
+/** 6 bytes of bit flags for TV formats shared by all TV format functions */
+struct psb_intel_sdvo_tv_format {
+    unsigned int ntsc_m:1;
+    unsigned int ntsc_j:1;
+    unsigned int ntsc_443:1;
+    unsigned int pal_b:1;
+    unsigned int pal_d:1;
+    unsigned int pal_g:1;
+    unsigned int pal_h:1;
+    unsigned int pal_i:1;
+
+    unsigned int pal_m:1;
+    unsigned int pal_n:1;
+    unsigned int pal_nc:1;
+    unsigned int pal_60:1;
+    unsigned int secam_b:1;
+    unsigned int secam_d:1;
+    unsigned int secam_g:1;
+    unsigned int secam_k:1;
+
+    unsigned int secam_k1:1;
+    unsigned int secam_l:1;
+    unsigned int secam_60:1;
+    unsigned int hdtv_std_smpte_240m_1080i_59:1;
+    unsigned int hdtv_std_smpte_240m_1080i_60:1;
+    unsigned int hdtv_std_smpte_260m_1080i_59:1;
+    unsigned int hdtv_std_smpte_260m_1080i_60:1;
+    unsigned int hdtv_std_smpte_274m_1080i_50:1;
+
+    unsigned int hdtv_std_smpte_274m_1080i_59:1;
+    unsigned int hdtv_std_smpte_274m_1080i_60:1;
+    unsigned int hdtv_std_smpte_274m_1080p_23:1;
+    unsigned int hdtv_std_smpte_274m_1080p_24:1;
+    unsigned int hdtv_std_smpte_274m_1080p_25:1;
+    unsigned int hdtv_std_smpte_274m_1080p_29:1;
+    unsigned int hdtv_std_smpte_274m_1080p_30:1;
+    unsigned int hdtv_std_smpte_274m_1080p_50:1;
+
+    unsigned int hdtv_std_smpte_274m_1080p_59:1;
+    unsigned int hdtv_std_smpte_274m_1080p_60:1;
+    unsigned int hdtv_std_smpte_295m_1080i_50:1;
+    unsigned int hdtv_std_smpte_295m_1080p_50:1;
+    unsigned int hdtv_std_smpte_296m_720p_59:1;
+    unsigned int hdtv_std_smpte_296m_720p_60:1;
+    unsigned int hdtv_std_smpte_296m_720p_50:1;
+    unsigned int hdtv_std_smpte_293m_480p_59:1;
+
+    unsigned int hdtv_std_smpte_170m_480i_59:1;
+    unsigned int hdtv_std_iturbt601_576i_50:1;
+    unsigned int hdtv_std_iturbt601_576p_50:1;
+    unsigned int hdtv_std_eia_7702a_480i_60:1;
+    unsigned int hdtv_std_eia_7702a_480p_60:1;
+    unsigned int pad:3;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_TV_FORMAT                         0x28
+
+#define SDVO_CMD_SET_TV_FORMAT                         0x29
+
+/** Returns the resolutiosn that can be used with the given TV format */
+#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT           0x83
+struct psb_intel_sdvo_sdtv_resolution_request {
+    unsigned int ntsc_m:1;
+    unsigned int ntsc_j:1;
+    unsigned int ntsc_443:1;
+    unsigned int pal_b:1;
+    unsigned int pal_d:1;
+    unsigned int pal_g:1;
+    unsigned int pal_h:1;
+    unsigned int pal_i:1;
+
+    unsigned int pal_m:1;
+    unsigned int pal_n:1;
+    unsigned int pal_nc:1;
+    unsigned int pal_60:1;
+    unsigned int secam_b:1;
+    unsigned int secam_d:1;
+    unsigned int secam_g:1;
+    unsigned int secam_k:1;
+
+    unsigned int secam_k1:1;
+    unsigned int secam_l:1;
+    unsigned int secam_60:1;
+    unsigned int pad:5;
+} __attribute__((packed));
+
+struct psb_intel_sdvo_sdtv_resolution_reply {
+    unsigned int res_320x200:1;
+    unsigned int res_320x240:1;
+    unsigned int res_400x300:1;
+    unsigned int res_640x350:1;
+    unsigned int res_640x400:1;
+    unsigned int res_640x480:1;
+    unsigned int res_704x480:1;
+    unsigned int res_704x576:1;
+
+    unsigned int res_720x350:1;
+    unsigned int res_720x400:1;
+    unsigned int res_720x480:1;
+    unsigned int res_720x540:1;
+    unsigned int res_720x576:1;
+    unsigned int res_768x576:1;
+    unsigned int res_800x600:1;
+    unsigned int res_832x624:1;
+
+    unsigned int res_920x766:1;
+    unsigned int res_1024x768:1;
+    unsigned int res_1280x1024:1;
+    unsigned int pad:5;
+} __attribute__((packed));
+
+/* Get supported resolution with squire pixel aspect ratio that can be
+   scaled for the requested HDTV format */
+#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT            0x85
+
+struct psb_intel_sdvo_hdtv_resolution_request {
+    unsigned int hdtv_std_smpte_240m_1080i_59:1;
+    unsigned int hdtv_std_smpte_240m_1080i_60:1;
+    unsigned int hdtv_std_smpte_260m_1080i_59:1;
+    unsigned int hdtv_std_smpte_260m_1080i_60:1;
+    unsigned int hdtv_std_smpte_274m_1080i_50:1;
+    unsigned int hdtv_std_smpte_274m_1080i_59:1;
+    unsigned int hdtv_std_smpte_274m_1080i_60:1;
+    unsigned int hdtv_std_smpte_274m_1080p_23:1;
+
+    unsigned int hdtv_std_smpte_274m_1080p_24:1;
+    unsigned int hdtv_std_smpte_274m_1080p_25:1;
+    unsigned int hdtv_std_smpte_274m_1080p_29:1;
+    unsigned int hdtv_std_smpte_274m_1080p_30:1;
+    unsigned int hdtv_std_smpte_274m_1080p_50:1;
+    unsigned int hdtv_std_smpte_274m_1080p_59:1;
+    unsigned int hdtv_std_smpte_274m_1080p_60:1;
+    unsigned int hdtv_std_smpte_295m_1080i_50:1;
+
+    unsigned int hdtv_std_smpte_295m_1080p_50:1;
+    unsigned int hdtv_std_smpte_296m_720p_59:1;
+    unsigned int hdtv_std_smpte_296m_720p_60:1;
+    unsigned int hdtv_std_smpte_296m_720p_50:1;
+    unsigned int hdtv_std_smpte_293m_480p_59:1;
+    unsigned int hdtv_std_smpte_170m_480i_59:1;
+    unsigned int hdtv_std_iturbt601_576i_50:1;
+    unsigned int hdtv_std_iturbt601_576p_50:1;
+
+    unsigned int hdtv_std_eia_7702a_480i_60:1;
+    unsigned int hdtv_std_eia_7702a_480p_60:1;
+    unsigned int pad:6;
+} __attribute__((packed));
+
+struct psb_intel_sdvo_hdtv_resolution_reply {
+    unsigned int res_640x480:1;
+    unsigned int res_800x600:1;
+    unsigned int res_1024x768:1;
+    unsigned int res_1280x960:1;
+    unsigned int res_1400x1050:1;
+    unsigned int res_1600x1200:1;
+    unsigned int res_1920x1440:1;
+    unsigned int res_2048x1536:1;
+
+    unsigned int res_2560x1920:1;
+    unsigned int res_3200x2400:1;
+    unsigned int res_3840x2880:1;
+    unsigned int pad1:5;
+
+    unsigned int res_848x480:1;
+    unsigned int res_1064x600:1;
+    unsigned int res_1280x720:1;
+    unsigned int res_1360x768:1;
+    unsigned int res_1704x960:1;
+    unsigned int res_1864x1050:1;
+    unsigned int res_1920x1080:1;
+    unsigned int res_2128x1200:1;
+
+    unsigned int res_2560x1400:1;
+    unsigned int res_2728x1536:1;
+    unsigned int res_3408x1920:1;
+    unsigned int res_4264x2400:1;
+    unsigned int res_5120x2880:1;
+    unsigned int pad2:3;
+
+    unsigned int res_768x480:1;
+    unsigned int res_960x600:1;
+    unsigned int res_1152x720:1;
+    unsigned int res_1124x768:1;
+    unsigned int res_1536x960:1;
+    unsigned int res_1680x1050:1;
+    unsigned int res_1728x1080:1;
+    unsigned int res_1920x1200:1;
+
+    unsigned int res_2304x1440:1;
+    unsigned int res_2456x1536:1;
+    unsigned int res_3072x1920:1;
+    unsigned int res_3840x2400:1;
+    unsigned int res_4608x2880:1;
+    unsigned int pad3:3;
+
+    unsigned int res_1280x1024:1;
+    unsigned int pad4:7;
+
+    unsigned int res_1280x768:1;
+    unsigned int pad5:7;
+} __attribute__((packed));
+
+/* Get supported power state returns info for encoder and monitor, rely on
+   last SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES            0x2a
+/* Get power state returns info for encoder and monitor, rely on last
+   SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_POWER_STATE                       0x2b
+#define SDVO_CMD_GET_ENCODER_POWER_STATE               0x2b
+#define SDVO_CMD_SET_ENCODER_POWER_STATE               0x2c
+# define SDVO_ENCODER_STATE_ON                                 (1 << 0)
+# define SDVO_ENCODER_STATE_STANDBY                            (1 << 1)
+# define SDVO_ENCODER_STATE_SUSPEND                            (1 << 2)
+# define SDVO_ENCODER_STATE_OFF                                        (1 << 3)
+# define SDVO_MONITOR_STATE_ON                                 (1 << 4)
+# define SDVO_MONITOR_STATE_STANDBY                            (1 << 5)
+# define SDVO_MONITOR_STATE_SUSPEND                            (1 << 6)
+# define SDVO_MONITOR_STATE_OFF                                        (1 << 7)
+
+#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING                0x2d
+#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING            0x2e
+#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING            0x2f
+/**
+ * The panel power sequencing parameters are in units of milliseconds.
+ * The high fields are bits 8:9 of the 10-bit values.
+ */
+struct psb_sdvo_panel_power_sequencing {
+    u8 t0;
+    u8 t1;
+    u8 t2;
+    u8 t3;
+    u8 t4;
+
+    unsigned int t0_high:2;
+    unsigned int t1_high:2;
+    unsigned int t2_high:2;
+    unsigned int t3_high:2;
+
+    unsigned int t4_high:2;
+    unsigned int pad:6;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL               0x30
+struct sdvo_max_backlight_reply {
+    u8 max_value;
+    u8 default_value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_BACKLIGHT_LEVEL                   0x31
+#define SDVO_CMD_SET_BACKLIGHT_LEVEL                   0x32
+
+#define SDVO_CMD_GET_AMBIENT_LIGHT                     0x33
+struct sdvo_get_ambient_light_reply {
+    u16 trip_low;
+    u16 trip_high;
+    u16 value;
+} __attribute__((packed));
+#define SDVO_CMD_SET_AMBIENT_LIGHT                     0x34
+struct sdvo_set_ambient_light_reply {
+    u16 trip_low;
+    u16 trip_high;
+    unsigned int enable:1;
+    unsigned int pad:7;
+} __attribute__((packed));
+
+/* Set display power state */
+#define SDVO_CMD_SET_DISPLAY_POWER_STATE               0x7d
+# define SDVO_DISPLAY_STATE_ON                         (1 << 0)
+# define SDVO_DISPLAY_STATE_STANDBY                    (1 << 1)
+# define SDVO_DISPLAY_STATE_SUSPEND                    (1 << 2)
+# define SDVO_DISPLAY_STATE_OFF                                (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS            0x84
+struct psb_intel_sdvo_enhancements_reply {
+    unsigned int flicker_filter:1;
+    unsigned int flicker_filter_adaptive:1;
+    unsigned int flicker_filter_2d:1;
+    unsigned int saturation:1;
+    unsigned int hue:1;
+    unsigned int brightness:1;
+    unsigned int contrast:1;
+    unsigned int overscan_h:1;
+
+    unsigned int overscan_v:1;
+    unsigned int hpos:1;
+    unsigned int vpos:1;
+    unsigned int sharpness:1;
+    unsigned int dot_crawl:1;
+    unsigned int dither:1;
+    unsigned int tv_chroma_filter:1;
+    unsigned int tv_luma_filter:1;
+} __attribute__((packed));
+
+/* Picture enhancement limits below are dependent on the current TV format,
+ * and thus need to be queried and set after it.
+ */
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER                        0x4d
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE       0x7b
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D             0x52
+#define SDVO_CMD_GET_MAX_SATURATION                    0x55
+#define SDVO_CMD_GET_MAX_HUE                           0x58
+#define SDVO_CMD_GET_MAX_BRIGHTNESS                    0x5b
+#define SDVO_CMD_GET_MAX_CONTRAST                      0x5e
+#define SDVO_CMD_GET_MAX_OVERSCAN_H                    0x61
+#define SDVO_CMD_GET_MAX_OVERSCAN_V                    0x64
+#define SDVO_CMD_GET_MAX_HPOS                          0x67
+#define SDVO_CMD_GET_MAX_VPOS                          0x6a
+#define SDVO_CMD_GET_MAX_SHARPNESS                     0x6d
+#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER              0x74
+#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER                        0x77
+struct psb_intel_sdvo_enhancement_limits_reply {
+    u16 max_value;
+    u16 default_value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION            0x7f
+#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION            0x80
+# define SDVO_LVDS_COLOR_DEPTH_18                      (0 << 0)
+# define SDVO_LVDS_COLOR_DEPTH_24                      (1 << 0)
+# define SDVO_LVDS_CONNECTOR_SPWG                      (0 << 2)
+# define SDVO_LVDS_CONNECTOR_OPENLDI                   (1 << 2)
+# define SDVO_LVDS_SINGLE_CHANNEL                      (0 << 4)
+# define SDVO_LVDS_DUAL_CHANNEL                                (1 << 4)
+
+#define SDVO_CMD_GET_FLICKER_FILTER                    0x4e
+#define SDVO_CMD_SET_FLICKER_FILTER                    0x4f
+#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE           0x50
+#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE           0x51
+#define SDVO_CMD_GET_FLICKER_FILTER_2D                 0x53
+#define SDVO_CMD_SET_FLICKER_FILTER_2D                 0x54
+#define SDVO_CMD_GET_SATURATION                                0x56
+#define SDVO_CMD_SET_SATURATION                                0x57
+#define SDVO_CMD_GET_HUE                               0x59
+#define SDVO_CMD_SET_HUE                               0x5a
+#define SDVO_CMD_GET_BRIGHTNESS                                0x5c
+#define SDVO_CMD_SET_BRIGHTNESS                                0x5d
+#define SDVO_CMD_GET_CONTRAST                          0x5f
+#define SDVO_CMD_SET_CONTRAST                          0x60
+#define SDVO_CMD_GET_OVERSCAN_H                                0x62
+#define SDVO_CMD_SET_OVERSCAN_H                                0x63
+#define SDVO_CMD_GET_OVERSCAN_V                                0x65
+#define SDVO_CMD_SET_OVERSCAN_V                                0x66
+#define SDVO_CMD_GET_HPOS                              0x68
+#define SDVO_CMD_SET_HPOS                              0x69
+#define SDVO_CMD_GET_VPOS                              0x6b
+#define SDVO_CMD_SET_VPOS                              0x6c
+#define SDVO_CMD_GET_SHARPNESS                         0x6e
+#define SDVO_CMD_SET_SHARPNESS                         0x6f
+#define SDVO_CMD_GET_TV_CHROMA_FILTER                  0x75
+#define SDVO_CMD_SET_TV_CHROMA_FILTER                  0x76
+#define SDVO_CMD_GET_TV_LUMA_FILTER                    0x78
+#define SDVO_CMD_SET_TV_LUMA_FILTER                    0x79
+struct psb_intel_sdvo_enhancements_arg {
+    u16 value;
+}__attribute__((packed));
+
+#define SDVO_CMD_GET_DOT_CRAWL                         0x70
+#define SDVO_CMD_SET_DOT_CRAWL                         0x71
+# define SDVO_DOT_CRAWL_ON                                     (1 << 0)
+# define SDVO_DOT_CRAWL_DEFAULT_ON                             (1 << 1)
+
+#define SDVO_CMD_GET_DITHER                            0x72
+#define SDVO_CMD_SET_DITHER                            0x73
+# define SDVO_DITHER_ON                                                (1 << 0)
+# define SDVO_DITHER_DEFAULT_ON                                        (1 << 1)
+
+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH                        0x7a
+# define SDVO_CONTROL_BUS_PROM                         (1 << 0)
+# define SDVO_CONTROL_BUS_DDC1                         (1 << 1)
+# define SDVO_CONTROL_BUS_DDC2                         (1 << 2)
+# define SDVO_CONTROL_BUS_DDC3                         (1 << 3)
+
+/* HDMI op codes */
+#define SDVO_CMD_GET_SUPP_ENCODE       0x9d
+#define SDVO_CMD_GET_ENCODE            0x9e
+#define SDVO_CMD_SET_ENCODE            0x9f
+  #define SDVO_ENCODE_DVI      0x0
+  #define SDVO_ENCODE_HDMI     0x1
+#define SDVO_CMD_SET_PIXEL_REPLI       0x8b
+#define SDVO_CMD_GET_PIXEL_REPLI       0x8c
+#define SDVO_CMD_GET_COLORIMETRY_CAP   0x8d
+#define SDVO_CMD_SET_COLORIMETRY       0x8e
+  #define SDVO_COLORIMETRY_RGB256   0x0
+  #define SDVO_COLORIMETRY_RGB220   0x1
+  #define SDVO_COLORIMETRY_YCrCb422 0x3
+  #define SDVO_COLORIMETRY_YCrCb444 0x4
+#define SDVO_CMD_GET_COLORIMETRY       0x8f
+#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
+#define SDVO_CMD_SET_AUDIO_STAT                0x91
+#define SDVO_CMD_GET_AUDIO_STAT                0x92
+#define SDVO_CMD_SET_HBUF_INDEX                0x93
+#define SDVO_CMD_GET_HBUF_INDEX                0x94
+#define SDVO_CMD_GET_HBUF_INFO         0x95
+#define SDVO_CMD_SET_HBUF_AV_SPLIT     0x96
+#define SDVO_CMD_GET_HBUF_AV_SPLIT     0x97
+#define SDVO_CMD_SET_HBUF_DATA         0x98
+#define SDVO_CMD_GET_HBUF_DATA         0x99
+#define SDVO_CMD_SET_HBUF_TXRATE       0x9a
+#define SDVO_CMD_GET_HBUF_TXRATE       0x9b
+  #define SDVO_HBUF_TX_DISABLED        (0 << 6)
+  #define SDVO_HBUF_TX_ONCE    (2 << 6)
+  #define SDVO_HBUF_TX_VSYNC   (3 << 6)
+#define SDVO_CMD_GET_AUDIO_TX_INFO     0x9c
+#define SDVO_NEED_TO_STALL  (1 << 7)
+
+struct psb_intel_sdvo_encode {
+    u8 dvi_rev;
+    u8 hdmi_rev;
+} __attribute__ ((packed));
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
new file mode 100644 (file)
index 0000000..7be802b
--- /dev/null
@@ -0,0 +1,564 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ */
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+
+/*
+ * inline functions
+ */
+
+static inline u32
+psb_pipestat(int pipe)
+{
+       if (pipe == 0)
+               return PIPEASTAT;
+       if (pipe == 1)
+               return PIPEBSTAT;
+       if (pipe == 2)
+               return PIPECSTAT;
+       BUG();
+}
+
+static inline u32
+mid_pipe_event(int pipe)
+{
+       if (pipe == 0)
+               return _PSB_PIPEA_EVENT_FLAG;
+       if (pipe == 1)
+               return _MDFLD_PIPEB_EVENT_FLAG;
+       if (pipe == 2)
+               return _MDFLD_PIPEC_EVENT_FLAG;
+       BUG();
+}
+
+static inline u32
+mid_pipe_vsync(int pipe)
+{
+       if (pipe == 0)
+               return _PSB_VSYNC_PIPEA_FLAG;
+       if (pipe == 1)
+               return _PSB_VSYNC_PIPEB_FLAG;
+       if (pipe == 2)
+               return _MDFLD_PIPEC_VBLANK_FLAG;
+       BUG();
+}
+
+static inline u32
+mid_pipeconf(int pipe)
+{
+       if (pipe == 0)
+               return PIPEACONF;
+       if (pipe == 1)
+               return PIPEBCONF;
+       if (pipe == 2)
+               return PIPECCONF;
+       BUG();
+}
+
+void
+psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
+{
+       if ((dev_priv->pipestat[pipe] & mask) != mask) {
+               u32 reg = psb_pipestat(pipe);
+               dev_priv->pipestat[pipe] |= mask;
+               /* Enable the interrupt, clear any pending status */
+               if (gma_power_begin(dev_priv->dev, false)) {
+                       u32 writeVal = PSB_RVDC32(reg);
+                       writeVal |= (mask | (mask >> 16));
+                       PSB_WVDC32(writeVal, reg);
+                       (void) PSB_RVDC32(reg);
+                       gma_power_end(dev_priv->dev);
+               }
+       }
+}
+
+void
+psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
+{
+       if ((dev_priv->pipestat[pipe] & mask) != 0) {
+               u32 reg = psb_pipestat(pipe);
+               dev_priv->pipestat[pipe] &= ~mask;
+               if (gma_power_begin(dev_priv->dev, false)) {
+                       u32 writeVal = PSB_RVDC32(reg);
+                       writeVal &= ~mask;
+                       PSB_WVDC32(writeVal, reg);
+                       (void) PSB_RVDC32(reg);
+                       gma_power_end(dev_priv->dev);
+               }
+       }
+}
+
+void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
+{
+       if (gma_power_begin(dev_priv->dev, false)) {
+               u32 pipe_event = mid_pipe_event(pipe);
+               dev_priv->vdc_irq_mask |= pipe_event;
+               PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+               PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+               gma_power_end(dev_priv->dev);
+       }
+}
+
+void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
+{
+       if (dev_priv->pipestat[pipe] == 0) {
+               if (gma_power_begin(dev_priv->dev, false)) {
+                       u32 pipe_event = mid_pipe_event(pipe);
+                       dev_priv->vdc_irq_mask &= ~pipe_event;
+                       PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+                       PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+                       gma_power_end(dev_priv->dev);
+               }
+       }
+}
+
+/**
+ * Display controller interrupt handler for pipe event.
+ *
+ */
+static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *) dev->dev_private;
+
+       uint32_t pipe_stat_val = 0;
+       uint32_t pipe_stat_reg = psb_pipestat(pipe);
+       uint32_t pipe_enable = dev_priv->pipestat[pipe];
+       uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16;
+       uint32_t pipe_clear;
+       uint32_t i = 0;
+
+       spin_lock(&dev_priv->irqmask_lock);
+
+       pipe_stat_val = PSB_RVDC32(pipe_stat_reg);
+       pipe_stat_val &= pipe_enable | pipe_status;
+       pipe_stat_val &= pipe_stat_val >> 16;
+
+       spin_unlock(&dev_priv->irqmask_lock);
+
+       /* Clear the 2nd level interrupt status bits
+        * Sometimes the bits are very sticky so we repeat until they unstick */
+       for (i = 0; i < 0xffff; i++) {
+               PSB_WVDC32(PSB_RVDC32(pipe_stat_reg), pipe_stat_reg);
+               pipe_clear = PSB_RVDC32(pipe_stat_reg) & pipe_status;
+
+               if (pipe_clear == 0)
+                       break;
+       }
+
+       if (pipe_clear)
+               dev_err(dev->dev,
+               "%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
+               __func__, pipe, PSB_RVDC32(pipe_stat_reg));
+
+       if (pipe_stat_val & PIPE_VBLANK_STATUS)
+               drm_handle_vblank(dev, pipe);
+
+       if (pipe_stat_val & PIPE_TE_STATUS)
+               drm_handle_vblank(dev, pipe);
+}
+
+/*
+ * Display controller interrupt handler.
+ */
+static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
+{
+       if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
+               mid_pipe_event_handler(dev, 0);
+
+       if (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)
+               mid_pipe_event_handler(dev, 1);
+}
+
+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
+{
+       struct drm_device *dev = (struct drm_device *) arg;
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *) dev->dev_private;
+
+       uint32_t vdc_stat, dsp_int = 0, sgx_int = 0;
+       int handled = 0;
+
+       spin_lock(&dev_priv->irqmask_lock);
+
+       vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
+
+       if (vdc_stat & _PSB_PIPE_EVENT_FLAG)
+               dsp_int = 1;
+
+       /* FIXME: Handle Medfield
+       if (vdc_stat & _MDFLD_DISP_ALL_IRQ_FLAG)
+               dsp_int = 1;
+       */
+
+       if (vdc_stat & _PSB_IRQ_SGX_FLAG)
+               sgx_int = 1;
+
+       vdc_stat &= dev_priv->vdc_irq_mask;
+       spin_unlock(&dev_priv->irqmask_lock);
+
+       if (dsp_int && gma_power_is_on(dev)) {
+               psb_vdc_interrupt(dev, vdc_stat);
+               handled = 1;
+       }
+
+       if (sgx_int) {
+               /* Not expected - we have it masked, shut it up */
+               u32 s, s2;
+               s = PSB_RSGX32(PSB_CR_EVENT_STATUS);
+               s2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
+               PSB_WSGX32(s, PSB_CR_EVENT_HOST_CLEAR);
+               PSB_WSGX32(s2, PSB_CR_EVENT_HOST_CLEAR2);
+               /* if s & _PSB_CE_TWOD_COMPLETE we have 2D done but
+                  we may as well poll even if we add that ! */
+               handled = 1;
+       }
+
+       PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
+       (void) PSB_RVDC32(PSB_INT_IDENTITY_R);
+       DRM_READMEMORYBARRIER();
+
+       if (!handled)
+               return IRQ_NONE;
+
+       return IRQ_HANDLED;
+}
+
+void psb_irq_preinstall(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *) dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+       if (gma_power_is_on(dev))
+               PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+       if (dev->vblank_enabled[0])
+               dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
+       if (dev->vblank_enabled[1])
+               dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
+
+       /* FIXME: Handle Medfield irq mask
+       if (dev->vblank_enabled[1])
+               dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
+       if (dev->vblank_enabled[2])
+               dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
+       */
+
+       /* This register is safe even if display island is off */
+       PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+       spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+int psb_irq_postinstall(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *) dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+       /* This register is safe even if display island is off */
+       PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+       PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+
+       if (dev->vblank_enabled[0])
+               psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+       else
+               psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+       if (dev->vblank_enabled[1])
+               psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+       else
+               psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+       if (dev->vblank_enabled[2])
+               psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+       else
+               psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+       spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+       return 0;
+}
+
+void psb_irq_uninstall(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *) dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+       PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+
+       if (dev->vblank_enabled[0])
+               psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+       if (dev->vblank_enabled[1])
+               psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+       if (dev->vblank_enabled[2])
+               psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+       dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
+                                 _PSB_IRQ_MSVDX_FLAG |
+                                 _LNC_IRQ_TOPAZ_FLAG;
+
+       /* These two registers are safe even if display island is off */
+       PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+       PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+
+       wmb();
+
+       /* This register is safe even if display island is off */
+       PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
+       spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+void psb_irq_turn_on_dpst(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+               (struct drm_psb_private *) dev->dev_private;
+       u32 hist_reg;
+       u32 pwm_reg;
+
+       if (gma_power_begin(dev, false)) {
+               PSB_WVDC32(1 << 31, HISTOGRAM_LOGIC_CONTROL);
+               hist_reg = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+               PSB_WVDC32(1 << 31, HISTOGRAM_INT_CONTROL);
+               hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+
+               PSB_WVDC32(0x80010100, PWM_CONTROL_LOGIC);
+               pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+               PSB_WVDC32(pwm_reg | PWM_PHASEIN_ENABLE
+                                               | PWM_PHASEIN_INT_ENABLE,
+                                                          PWM_CONTROL_LOGIC);
+               pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+
+               psb_enable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
+
+               hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+               PSB_WVDC32(hist_reg | HISTOGRAM_INT_CTRL_CLEAR,
+                                                       HISTOGRAM_INT_CONTROL);
+               pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+               PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE,
+                                                       PWM_CONTROL_LOGIC);
+
+               gma_power_end(dev);
+       }
+}
+
+int psb_irq_enable_dpst(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+               (struct drm_psb_private *) dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+       /* enable DPST */
+       mid_enable_pipe_event(dev_priv, 0);
+       psb_irq_turn_on_dpst(dev);
+
+       spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+       return 0;
+}
+
+void psb_irq_turn_off_dpst(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *) dev->dev_private;
+       u32 hist_reg;
+       u32 pwm_reg;
+
+       if (gma_power_begin(dev, false)) {
+               PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
+               hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+
+               psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
+
+               pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+               PSB_WVDC32(pwm_reg & !(PWM_PHASEIN_INT_ENABLE),
+                                                       PWM_CONTROL_LOGIC);
+               pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+
+               gma_power_end(dev);
+       }
+}
+
+int psb_irq_disable_dpst(struct drm_device *dev)
+{
+       struct drm_psb_private *dev_priv =
+           (struct drm_psb_private *) dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+       mid_disable_pipe_event(dev_priv, 0);
+       psb_irq_turn_off_dpst(dev);
+
+       spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+       return 0;
+}
+
+#ifdef PSB_FIXME
+static int psb_vblank_do_wait(struct drm_device *dev,
+                             unsigned int *sequence, atomic_t *counter)
+{
+       unsigned int cur_vblank;
+       int ret = 0;
+       DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+                   (((cur_vblank = atomic_read(counter))
+                     - *sequence) <= (1 << 23)));
+       *sequence = cur_vblank;
+
+       return ret;
+}
+#endif
+
+/*
+ * It is used to enable VBLANK interrupt
+ */
+int psb_enable_vblank(struct drm_device *dev, int pipe)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned long irqflags;
+       uint32_t reg_val = 0;
+       uint32_t pipeconf_reg = mid_pipeconf(pipe);
+
+       if (gma_power_begin(dev, false)) {
+               reg_val = REG_READ(pipeconf_reg);
+               gma_power_end(dev);
+       }
+
+       if (!(reg_val & PIPEACONF_ENABLE))
+               return -EINVAL;
+
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+       if (pipe == 0)
+               dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
+       else if (pipe == 1)
+               dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
+
+       PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+       PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+       psb_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+       spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+       return 0;
+}
+
+/*
+ * It is used to disable VBLANK interrupt
+ */
+void psb_disable_vblank(struct drm_device *dev, int pipe)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+       if (pipe == 0)
+               dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
+       else if (pipe == 1)
+               dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG;
+
+       PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+       PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+       psb_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+       spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+/* Called from drm generic code, passed a 'crtc', which
+ * we use as a pipe index
+ */
+u32 psb_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+       uint32_t high_frame = PIPEAFRAMEHIGH;
+       uint32_t low_frame = PIPEAFRAMEPIXEL;
+       uint32_t pipeconf_reg = PIPEACONF;
+       uint32_t reg_val = 0;
+       uint32_t high1 = 0, high2 = 0, low = 0, count = 0;
+
+       switch (pipe) {
+       case 0:
+               break;
+       case 1:
+               high_frame = PIPEBFRAMEHIGH;
+               low_frame = PIPEBFRAMEPIXEL;
+               pipeconf_reg = PIPEBCONF;
+               break;
+       case 2:
+               high_frame = PIPECFRAMEHIGH;
+               low_frame = PIPECFRAMEPIXEL;
+               pipeconf_reg = PIPECCONF;
+               break;
+       default:
+               dev_err(dev->dev, "%s, invalid pipe.\n", __func__);
+               return 0;
+       }
+
+       if (!gma_power_begin(dev, false))
+               return 0;
+
+       reg_val = REG_READ(pipeconf_reg);
+
+       if (!(reg_val & PIPEACONF_ENABLE)) {
+               dev_err(dev->dev, "trying to get vblank count for disabled pipe %d\n",
+                                                               pipe);
+               goto psb_get_vblank_counter_exit;
+       }
+
+       /*
+        * High & low register fields aren't synchronized, so make sure
+        * we get a low value that's stable across two reads of the high
+        * register.
+        */
+       do {
+               high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+                        PIPE_FRAME_HIGH_SHIFT);
+               low =  ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
+                       PIPE_FRAME_LOW_SHIFT);
+               high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+                        PIPE_FRAME_HIGH_SHIFT);
+       } while (high1 != high2);
+
+       count = (high1 << 8) | low;
+
+psb_get_vblank_counter_exit:
+
+       gma_power_end(dev);
+
+       return count;
+}
+
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
new file mode 100644 (file)
index 0000000..216fda3
--- /dev/null
@@ -0,0 +1,45 @@
+/**************************************************************************
+ * Copyright (c) 2009-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Benjamin Defnet <benjamin.r.defnet@intel.com>
+ *    Rajesh Poornachandran <rajesh.poornachandran@intel.com>
+ *
+ **************************************************************************/
+
+#ifndef _SYSIRQ_H_
+#define _SYSIRQ_H_
+
+#include <drm/drmP.h>
+
+bool sysirq_init(struct drm_device *dev);
+void sysirq_uninit(struct drm_device *dev);
+
+void psb_irq_preinstall(struct drm_device *dev);
+int  psb_irq_postinstall(struct drm_device *dev);
+void psb_irq_uninstall(struct drm_device *dev);
+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+
+int psb_irq_enable_dpst(struct drm_device *dev);
+int psb_irq_disable_dpst(struct drm_device *dev);
+void psb_irq_turn_on_dpst(struct drm_device *dev);
+void psb_irq_turn_off_dpst(struct drm_device *dev);
+int  psb_enable_vblank(struct drm_device *dev, int pipe);
+void psb_disable_vblank(struct drm_device *dev, int pipe);
+u32  psb_get_vblank_counter(struct drm_device *dev, int pipe);
+
+#endif /* _SYSIRQ_H_ */
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
new file mode 100644 (file)
index 0000000..b867aab
--- /dev/null
@@ -0,0 +1,88 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include <linux/spinlock.h>
+
+static void psb_lid_timer_func(unsigned long data)
+{
+       struct drm_psb_private * dev_priv = (struct drm_psb_private *)data;
+       struct drm_device *dev = (struct drm_device *)dev_priv->dev;
+       struct timer_list *lid_timer = &dev_priv->lid_timer;
+       unsigned long irq_flags;
+       u32 *lid_state = dev_priv->lid_state;
+       u32 pp_status;
+
+       if (readl(lid_state) == dev_priv->lid_last_state)
+               goto lid_timer_schedule;
+
+       if ((readl(lid_state)) & 0x01) {
+               /*lid state is open*/
+               REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
+               do {
+                       pp_status = REG_READ(PP_STATUS);
+               } while ((pp_status & PP_ON) == 0);
+
+               /*FIXME: should be backlight level before*/
+               psb_intel_lvds_set_brightness(dev, 100);
+       } else {
+               psb_intel_lvds_set_brightness(dev, 0);
+
+               REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
+               do {
+                       pp_status = REG_READ(PP_STATUS);
+               } while ((pp_status & PP_ON) == 0);
+       }
+       dev_priv->lid_last_state =  readl(lid_state);
+
+lid_timer_schedule:
+       spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
+       if (!timer_pending(lid_timer)) {
+               lid_timer->expires = jiffies + PSB_LID_DELAY;
+               add_timer(lid_timer);
+       }
+       spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
+}
+
+void psb_lid_timer_init(struct drm_psb_private *dev_priv)
+{
+       struct timer_list *lid_timer = &dev_priv->lid_timer;
+       unsigned long irq_flags;
+
+       spin_lock_init(&dev_priv->lid_lock);
+       spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
+
+       init_timer(lid_timer);
+
+       lid_timer->data = (unsigned long)dev_priv;
+       lid_timer->function = psb_lid_timer_func;
+       lid_timer->expires = jiffies + PSB_LID_DELAY;
+
+       add_timer(lid_timer);
+       spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
+}
+
+void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
+{
+       del_timer_sync(&dev_priv->lid_timer);
+}
+
diff --git a/drivers/gpu/drm/gma500/psb_reg.h b/drivers/gpu/drm/gma500/psb_reg.h
new file mode 100644 (file)
index 0000000..b81c7c1
--- /dev/null
@@ -0,0 +1,582 @@
+/**************************************************************************
+ *
+ * Copyright (c) (2005-2007) Imagination Technologies Limited.
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA..
+ *
+ **************************************************************************/
+
+#ifndef _PSB_REG_H_
+#define _PSB_REG_H_
+
+#define PSB_CR_CLKGATECTL              0x0000
+#define _PSB_C_CLKGATECTL_AUTO_MAN_REG         (1 << 24)
+#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT       (20)
+#define _PSB_C_CLKGATECTL_USE_CLKG_MASK                (0x3 << 20)
+#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT       (16)
+#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK                (0x3 << 16)
+#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT                (12)
+#define _PSB_C_CLKGATECTL_TA_CLKG_MASK         (0x3 << 12)
+#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT       (8)
+#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK                (0x3 << 8)
+#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT       (4)
+#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK                (0x3 << 4)
+#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT                (0)
+#define _PSB_C_CLKGATECTL_2D_CLKG_MASK         (0x3 << 0)
+#define _PSB_C_CLKGATECTL_CLKG_ENABLED         (0)
+#define _PSB_C_CLKGATECTL_CLKG_DISABLED                (1)
+#define _PSB_C_CLKGATECTL_CLKG_AUTO            (2)
+
+#define PSB_CR_CORE_ID                 0x0010
+#define _PSB_CC_ID_ID_SHIFT                    (16)
+#define _PSB_CC_ID_ID_MASK                     (0xFFFF << 16)
+#define _PSB_CC_ID_CONFIG_SHIFT                        (0)
+#define _PSB_CC_ID_CONFIG_MASK                 (0xFFFF << 0)
+
+#define PSB_CR_CORE_REVISION           0x0014
+#define _PSB_CC_REVISION_DESIGNER_SHIFT                (24)
+#define _PSB_CC_REVISION_DESIGNER_MASK         (0xFF << 24)
+#define _PSB_CC_REVISION_MAJOR_SHIFT           (16)
+#define _PSB_CC_REVISION_MAJOR_MASK            (0xFF << 16)
+#define _PSB_CC_REVISION_MINOR_SHIFT           (8)
+#define _PSB_CC_REVISION_MINOR_MASK            (0xFF << 8)
+#define _PSB_CC_REVISION_MAINTENANCE_SHIFT     (0)
+#define _PSB_CC_REVISION_MAINTENANCE_MASK      (0xFF << 0)
+
+#define PSB_CR_DESIGNER_REV_FIELD1     0x0018
+
+#define PSB_CR_SOFT_RESET              0x0080
+#define _PSB_CS_RESET_TSP_RESET                (1 << 6)
+#define _PSB_CS_RESET_ISP_RESET                (1 << 5)
+#define _PSB_CS_RESET_USE_RESET                (1 << 4)
+#define _PSB_CS_RESET_TA_RESET         (1 << 3)
+#define _PSB_CS_RESET_DPM_RESET                (1 << 2)
+#define _PSB_CS_RESET_TWOD_RESET       (1 << 1)
+#define _PSB_CS_RESET_BIF_RESET                        (1 << 0)
+
+#define PSB_CR_DESIGNER_REV_FIELD2     0x001C
+
+#define PSB_CR_EVENT_HOST_ENABLE2      0x0110
+
+#define PSB_CR_EVENT_STATUS2           0x0118
+
+#define PSB_CR_EVENT_HOST_CLEAR2       0x0114
+#define _PSB_CE2_BIF_REQUESTER_FAULT           (1 << 4)
+
+#define PSB_CR_EVENT_STATUS            0x012C
+
+#define PSB_CR_EVENT_HOST_ENABLE       0x0130
+
+#define PSB_CR_EVENT_HOST_CLEAR                0x0134
+#define _PSB_CE_MASTER_INTERRUPT               (1 << 31)
+#define _PSB_CE_TA_DPM_FAULT                   (1 << 28)
+#define _PSB_CE_TWOD_COMPLETE                  (1 << 27)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS          (1 << 25)
+#define _PSB_CE_DPM_TA_MEM_FREE                        (1 << 24)
+#define _PSB_CE_PIXELBE_END_RENDER             (1 << 18)
+#define _PSB_CE_SW_EVENT                       (1 << 14)
+#define _PSB_CE_TA_FINISHED                    (1 << 13)
+#define _PSB_CE_TA_TERMINATE                   (1 << 12)
+#define _PSB_CE_DPM_REACHED_MEM_THRESH         (1 << 3)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL          (1 << 2)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_MT           (1 << 1)
+#define _PSB_CE_DPM_3D_MEM_FREE                        (1 << 0)
+
+
+#define PSB_USE_OFFSET_MASK            0x0007FFFF
+#define PSB_USE_OFFSET_SIZE            (PSB_USE_OFFSET_MASK + 1)
+#define PSB_CR_USE_CODE_BASE0          0x0A0C
+#define PSB_CR_USE_CODE_BASE1          0x0A10
+#define PSB_CR_USE_CODE_BASE2          0x0A14
+#define PSB_CR_USE_CODE_BASE3          0x0A18
+#define PSB_CR_USE_CODE_BASE4          0x0A1C
+#define PSB_CR_USE_CODE_BASE5          0x0A20
+#define PSB_CR_USE_CODE_BASE6          0x0A24
+#define PSB_CR_USE_CODE_BASE7          0x0A28
+#define PSB_CR_USE_CODE_BASE8          0x0A2C
+#define PSB_CR_USE_CODE_BASE9          0x0A30
+#define PSB_CR_USE_CODE_BASE10         0x0A34
+#define PSB_CR_USE_CODE_BASE11         0x0A38
+#define PSB_CR_USE_CODE_BASE12         0x0A3C
+#define PSB_CR_USE_CODE_BASE13         0x0A40
+#define PSB_CR_USE_CODE_BASE14         0x0A44
+#define PSB_CR_USE_CODE_BASE15         0x0A48
+#define PSB_CR_USE_CODE_BASE(_i)       (0x0A0C + ((_i) << 2))
+#define _PSB_CUC_BASE_DM_SHIFT                 (25)
+#define _PSB_CUC_BASE_DM_MASK                  (0x3 << 25)
+#define _PSB_CUC_BASE_ADDR_SHIFT               (0)     /* 1024-bit aligned address? */
+#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT          (7)
+#define _PSB_CUC_BASE_ADDR_MASK                        (0x1FFFFFF << 0)
+#define _PSB_CUC_DM_VERTEX                     (0)
+#define _PSB_CUC_DM_PIXEL                      (1)
+#define _PSB_CUC_DM_RESERVED                   (2)
+#define _PSB_CUC_DM_EDM                                (3)
+
+#define PSB_CR_PDS_EXEC_BASE           0x0AB8
+#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT       (20)    /* 1MB aligned address */
+#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT  (20)
+
+#define PSB_CR_EVENT_KICKER            0x0AC4
+#define _PSB_CE_KICKER_ADDRESS_SHIFT           (4)     /* 128-bit aligned address */
+
+#define PSB_CR_EVENT_KICK              0x0AC8
+#define _PSB_CE_KICK_NOW                       (1 << 0)
+
+#define PSB_CR_BIF_DIR_LIST_BASE1      0x0C38
+
+#define PSB_CR_BIF_CTRL                        0x0C00
+#define _PSB_CB_CTRL_CLEAR_FAULT               (1 << 4)
+#define _PSB_CB_CTRL_INVALDC                   (1 << 3)
+#define _PSB_CB_CTRL_FLUSH                     (1 << 2)
+
+#define PSB_CR_BIF_INT_STAT            0x0C04
+
+#define PSB_CR_BIF_FAULT               0x0C08
+#define _PSB_CBI_STAT_PF_N_RW                  (1 << 14)
+#define _PSB_CBI_STAT_FAULT_SHIFT              (0)
+#define _PSB_CBI_STAT_FAULT_MASK               (0x3FFF << 0)
+#define _PSB_CBI_STAT_FAULT_CACHE              (1 << 1)
+#define _PSB_CBI_STAT_FAULT_TA                 (1 << 2)
+#define _PSB_CBI_STAT_FAULT_VDM                        (1 << 3)
+#define _PSB_CBI_STAT_FAULT_2D                 (1 << 4)
+#define _PSB_CBI_STAT_FAULT_PBE                        (1 << 5)
+#define _PSB_CBI_STAT_FAULT_TSP                        (1 << 6)
+#define _PSB_CBI_STAT_FAULT_ISP                        (1 << 7)
+#define _PSB_CBI_STAT_FAULT_USSEPDS            (1 << 8)
+#define _PSB_CBI_STAT_FAULT_HOST               (1 << 9)
+
+#define PSB_CR_BIF_BANK0               0x0C78
+#define PSB_CR_BIF_BANK1               0x0C7C
+#define PSB_CR_BIF_DIR_LIST_BASE0      0x0C84
+#define PSB_CR_BIF_TWOD_REQ_BASE       0x0C88
+#define PSB_CR_BIF_3D_REQ_BASE         0x0CAC
+
+#define PSB_CR_2D_SOCIF                        0x0E18
+#define _PSB_C2_SOCIF_FREESPACE_SHIFT          (0)
+#define _PSB_C2_SOCIF_FREESPACE_MASK           (0xFF << 0)
+#define _PSB_C2_SOCIF_EMPTY                    (0x80 << 0)
+
+#define PSB_CR_2D_BLIT_STATUS          0x0E04
+#define _PSB_C2B_STATUS_BUSY                   (1 << 24)
+#define _PSB_C2B_STATUS_COMPLETE_SHIFT         (0)
+#define _PSB_C2B_STATUS_COMPLETE_MASK          (0xFFFFFF << 0)
+
+/*
+ * 2D defs.
+ */
+
+/*
+ * 2D Slave Port Data : Block Header's Object Type
+ */
+
+#define        PSB_2D_CLIP_BH                  (0x00000000)
+#define        PSB_2D_PAT_BH                   (0x10000000)
+#define        PSB_2D_CTRL_BH                  (0x20000000)
+#define        PSB_2D_SRC_OFF_BH               (0x30000000)
+#define        PSB_2D_MASK_OFF_BH              (0x40000000)
+#define        PSB_2D_RESERVED1_BH             (0x50000000)
+#define        PSB_2D_RESERVED2_BH             (0x60000000)
+#define        PSB_2D_FENCE_BH                 (0x70000000)
+#define        PSB_2D_BLIT_BH                  (0x80000000)
+#define        PSB_2D_SRC_SURF_BH              (0x90000000)
+#define        PSB_2D_DST_SURF_BH              (0xA0000000)
+#define        PSB_2D_PAT_SURF_BH              (0xB0000000)
+#define        PSB_2D_SRC_PAL_BH               (0xC0000000)
+#define        PSB_2D_PAT_PAL_BH               (0xD0000000)
+#define        PSB_2D_MASK_SURF_BH             (0xE0000000)
+#define        PSB_2D_FLUSH_BH                 (0xF0000000)
+
+/*
+ * Clip Definition block (PSB_2D_CLIP_BH)
+ */
+#define PSB_2D_CLIPCOUNT_MAX           (1)
+#define PSB_2D_CLIPCOUNT_MASK          (0x00000000)
+#define PSB_2D_CLIPCOUNT_CLRMASK       (0xFFFFFFFF)
+#define PSB_2D_CLIPCOUNT_SHIFT         (0)
+/* clip rectangle min & max */
+#define PSB_2D_CLIP_XMAX_MASK          (0x00FFF000)
+#define PSB_2D_CLIP_XMAX_CLRMASK       (0xFF000FFF)
+#define PSB_2D_CLIP_XMAX_SHIFT         (12)
+#define PSB_2D_CLIP_XMIN_MASK          (0x00000FFF)
+#define PSB_2D_CLIP_XMIN_CLRMASK       (0x00FFF000)
+#define PSB_2D_CLIP_XMIN_SHIFT         (0)
+/* clip rectangle offset */
+#define PSB_2D_CLIP_YMAX_MASK          (0x00FFF000)
+#define PSB_2D_CLIP_YMAX_CLRMASK       (0xFF000FFF)
+#define PSB_2D_CLIP_YMAX_SHIFT         (12)
+#define PSB_2D_CLIP_YMIN_MASK          (0x00000FFF)
+#define PSB_2D_CLIP_YMIN_CLRMASK       (0x00FFF000)
+#define PSB_2D_CLIP_YMIN_SHIFT         (0)
+
+/*
+ * Pattern Control (PSB_2D_PAT_BH)
+ */
+#define PSB_2D_PAT_HEIGHT_MASK         (0x0000001F)
+#define PSB_2D_PAT_HEIGHT_SHIFT                (0)
+#define PSB_2D_PAT_WIDTH_MASK          (0x000003E0)
+#define PSB_2D_PAT_WIDTH_SHIFT         (5)
+#define PSB_2D_PAT_YSTART_MASK         (0x00007C00)
+#define PSB_2D_PAT_YSTART_SHIFT                (10)
+#define PSB_2D_PAT_XSTART_MASK         (0x000F8000)
+#define PSB_2D_PAT_XSTART_SHIFT                (15)
+
+/*
+ * 2D Control block (PSB_2D_CTRL_BH)
+ */
+/* Present Flags */
+#define PSB_2D_SRCCK_CTRL              (0x00000001)
+#define PSB_2D_DSTCK_CTRL              (0x00000002)
+#define PSB_2D_ALPHA_CTRL              (0x00000004)
+/* Colour Key Colour (SRC/DST)*/
+#define PSB_2D_CK_COL_MASK             (0xFFFFFFFF)
+#define PSB_2D_CK_COL_CLRMASK          (0x00000000)
+#define PSB_2D_CK_COL_SHIFT            (0)
+/* Colour Key Mask (SRC/DST)*/
+#define PSB_2D_CK_MASK_MASK            (0xFFFFFFFF)
+#define PSB_2D_CK_MASK_CLRMASK         (0x00000000)
+#define PSB_2D_CK_MASK_SHIFT           (0)
+/* Alpha Control (Alpha/RGB)*/
+#define PSB_2D_GBLALPHA_MASK           (0x000FF000)
+#define PSB_2D_GBLALPHA_CLRMASK                (0xFFF00FFF)
+#define PSB_2D_GBLALPHA_SHIFT          (12)
+#define PSB_2D_SRCALPHA_OP_MASK                (0x00700000)
+#define PSB_2D_SRCALPHA_OP_CLRMASK     (0xFF8FFFFF)
+#define PSB_2D_SRCALPHA_OP_SHIFT       (20)
+#define PSB_2D_SRCALPHA_OP_ONE         (0x00000000)
+#define PSB_2D_SRCALPHA_OP_SRC         (0x00100000)
+#define PSB_2D_SRCALPHA_OP_DST         (0x00200000)
+#define PSB_2D_SRCALPHA_OP_SG          (0x00300000)
+#define PSB_2D_SRCALPHA_OP_DG          (0x00400000)
+#define PSB_2D_SRCALPHA_OP_GBL         (0x00500000)
+#define PSB_2D_SRCALPHA_OP_ZERO                (0x00600000)
+#define PSB_2D_SRCALPHA_INVERT         (0x00800000)
+#define PSB_2D_SRCALPHA_INVERT_CLR     (0xFF7FFFFF)
+#define PSB_2D_DSTALPHA_OP_MASK                (0x07000000)
+#define PSB_2D_DSTALPHA_OP_CLRMASK     (0xF8FFFFFF)
+#define PSB_2D_DSTALPHA_OP_SHIFT       (24)
+#define PSB_2D_DSTALPHA_OP_ONE         (0x00000000)
+#define PSB_2D_DSTALPHA_OP_SRC         (0x01000000)
+#define PSB_2D_DSTALPHA_OP_DST         (0x02000000)
+#define PSB_2D_DSTALPHA_OP_SG          (0x03000000)
+#define PSB_2D_DSTALPHA_OP_DG          (0x04000000)
+#define PSB_2D_DSTALPHA_OP_GBL         (0x05000000)
+#define PSB_2D_DSTALPHA_OP_ZERO                (0x06000000)
+#define PSB_2D_DSTALPHA_INVERT         (0x08000000)
+#define PSB_2D_DSTALPHA_INVERT_CLR     (0xF7FFFFFF)
+
+#define PSB_2D_PRE_MULTIPLICATION_ENABLE       (0x10000000)
+#define PSB_2D_PRE_MULTIPLICATION_CLRMASK      (0xEFFFFFFF)
+#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE                (0x20000000)
+#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK       (0xDFFFFFFF)
+
+/*
+ *Source Offset (PSB_2D_SRC_OFF_BH)
+ */
+#define PSB_2D_SRCOFF_XSTART_MASK      ((0x00000FFF) << 12)
+#define PSB_2D_SRCOFF_XSTART_SHIFT     (12)
+#define PSB_2D_SRCOFF_YSTART_MASK      (0x00000FFF)
+#define PSB_2D_SRCOFF_YSTART_SHIFT     (0)
+
+/*
+ * Mask Offset (PSB_2D_MASK_OFF_BH)
+ */
+#define PSB_2D_MASKOFF_XSTART_MASK     ((0x00000FFF) << 12)
+#define PSB_2D_MASKOFF_XSTART_SHIFT    (12)
+#define PSB_2D_MASKOFF_YSTART_MASK     (0x00000FFF)
+#define PSB_2D_MASKOFF_YSTART_SHIFT    (0)
+
+/*
+ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
+ */
+
+/*
+ *Blit Rectangle (PSB_2D_BLIT_BH)
+ */
+
+#define PSB_2D_ROT_MASK                        (3 << 25)
+#define PSB_2D_ROT_CLRMASK             (~PSB_2D_ROT_MASK)
+#define PSB_2D_ROT_NONE                        (0 << 25)
+#define PSB_2D_ROT_90DEGS              (1 << 25)
+#define PSB_2D_ROT_180DEGS             (2 << 25)
+#define PSB_2D_ROT_270DEGS             (3 << 25)
+
+#define PSB_2D_COPYORDER_MASK          (3 << 23)
+#define PSB_2D_COPYORDER_CLRMASK       (~PSB_2D_COPYORDER_MASK)
+#define PSB_2D_COPYORDER_TL2BR         (0 << 23)
+#define PSB_2D_COPYORDER_BR2TL         (1 << 23)
+#define PSB_2D_COPYORDER_TR2BL         (2 << 23)
+#define PSB_2D_COPYORDER_BL2TR         (3 << 23)
+
+#define PSB_2D_DSTCK_CLRMASK           (0xFF9FFFFF)
+#define PSB_2D_DSTCK_DISABLE           (0x00000000)
+#define PSB_2D_DSTCK_PASS              (0x00200000)
+#define PSB_2D_DSTCK_REJECT            (0x00400000)
+
+#define PSB_2D_SRCCK_CLRMASK           (0xFFE7FFFF)
+#define PSB_2D_SRCCK_DISABLE           (0x00000000)
+#define PSB_2D_SRCCK_PASS              (0x00080000)
+#define PSB_2D_SRCCK_REJECT            (0x00100000)
+
+#define PSB_2D_CLIP_ENABLE             (0x00040000)
+
+#define PSB_2D_ALPHA_ENABLE            (0x00020000)
+
+#define PSB_2D_PAT_CLRMASK             (0xFFFEFFFF)
+#define PSB_2D_PAT_MASK                        (0x00010000)
+#define PSB_2D_USE_PAT                 (0x00010000)
+#define PSB_2D_USE_FILL                        (0x00000000)
+/*
+ * Tungsten Graphics note on rop codes: If rop A and rop B are
+ * identical, the mask surface will not be read and need not be
+ * set up.
+ */
+
+#define PSB_2D_ROP3B_MASK              (0x0000FF00)
+#define PSB_2D_ROP3B_CLRMASK           (0xFFFF00FF)
+#define PSB_2D_ROP3B_SHIFT             (8)
+/* rop code A */
+#define PSB_2D_ROP3A_MASK              (0x000000FF)
+#define PSB_2D_ROP3A_CLRMASK           (0xFFFFFF00)
+#define PSB_2D_ROP3A_SHIFT             (0)
+
+#define PSB_2D_ROP4_MASK               (0x0000FFFF)
+/*
+ *     DWORD0: (Only pass if Pattern control == Use Fill Colour)
+ *     Fill Colour RGBA8888
+ */
+#define PSB_2D_FILLCOLOUR_MASK         (0xFFFFFFFF)
+#define PSB_2D_FILLCOLOUR_SHIFT                (0)
+/*
+ *     DWORD1: (Always Present)
+ *     X Start (Dest)
+ *     Y Start (Dest)
+ */
+#define PSB_2D_DST_XSTART_MASK         (0x00FFF000)
+#define PSB_2D_DST_XSTART_CLRMASK      (0xFF000FFF)
+#define PSB_2D_DST_XSTART_SHIFT                (12)
+#define PSB_2D_DST_YSTART_MASK         (0x00000FFF)
+#define PSB_2D_DST_YSTART_CLRMASK      (0xFFFFF000)
+#define PSB_2D_DST_YSTART_SHIFT                (0)
+/*
+ *     DWORD2: (Always Present)
+ *     X Size (Dest)
+ *     Y Size (Dest)
+ */
+#define PSB_2D_DST_XSIZE_MASK          (0x00FFF000)
+#define PSB_2D_DST_XSIZE_CLRMASK       (0xFF000FFF)
+#define PSB_2D_DST_XSIZE_SHIFT         (12)
+#define PSB_2D_DST_YSIZE_MASK          (0x00000FFF)
+#define PSB_2D_DST_YSIZE_CLRMASK       (0xFFFFF000)
+#define PSB_2D_DST_YSIZE_SHIFT         (0)
+
+/*
+ * Source Surface (PSB_2D_SRC_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+
+#define PSB_2D_SRC_FORMAT_MASK         (0x00078000)
+#define PSB_2D_SRC_1_PAL               (0x00000000)
+#define PSB_2D_SRC_2_PAL               (0x00008000)
+#define PSB_2D_SRC_4_PAL               (0x00010000)
+#define PSB_2D_SRC_8_PAL               (0x00018000)
+#define PSB_2D_SRC_8_ALPHA             (0x00020000)
+#define PSB_2D_SRC_4_ALPHA             (0x00028000)
+#define PSB_2D_SRC_332RGB              (0x00030000)
+#define PSB_2D_SRC_4444ARGB            (0x00038000)
+#define PSB_2D_SRC_555RGB              (0x00040000)
+#define PSB_2D_SRC_1555ARGB            (0x00048000)
+#define PSB_2D_SRC_565RGB              (0x00050000)
+#define PSB_2D_SRC_0888ARGB            (0x00058000)
+#define PSB_2D_SRC_8888ARGB            (0x00060000)
+#define PSB_2D_SRC_8888UYVY            (0x00068000)
+#define PSB_2D_SRC_RESERVED            (0x00070000)
+#define PSB_2D_SRC_1555ARGB_LOOKUP     (0x00078000)
+
+
+#define PSB_2D_SRC_STRIDE_MASK         (0x00007FFF)
+#define PSB_2D_SRC_STRIDE_CLRMASK      (0xFFFF8000)
+#define PSB_2D_SRC_STRIDE_SHIFT                (0)
+/*
+ *  WORD 1 - Base Address
+ */
+#define PSB_2D_SRC_ADDR_MASK           (0x0FFFFFFC)
+#define PSB_2D_SRC_ADDR_CLRMASK                (0x00000003)
+#define PSB_2D_SRC_ADDR_SHIFT          (2)
+#define PSB_2D_SRC_ADDR_ALIGNSHIFT     (2)
+
+/*
+ * Pattern Surface (PSB_2D_PAT_SURF_BH)
+ */
+/*
+ *  WORD 0
+ */
+
+#define PSB_2D_PAT_FORMAT_MASK         (0x00078000)
+#define PSB_2D_PAT_1_PAL               (0x00000000)
+#define PSB_2D_PAT_2_PAL               (0x00008000)
+#define PSB_2D_PAT_4_PAL               (0x00010000)
+#define PSB_2D_PAT_8_PAL               (0x00018000)
+#define PSB_2D_PAT_8_ALPHA             (0x00020000)
+#define PSB_2D_PAT_4_ALPHA             (0x00028000)
+#define PSB_2D_PAT_332RGB              (0x00030000)
+#define PSB_2D_PAT_4444ARGB            (0x00038000)
+#define PSB_2D_PAT_555RGB              (0x00040000)
+#define PSB_2D_PAT_1555ARGB            (0x00048000)
+#define PSB_2D_PAT_565RGB              (0x00050000)
+#define PSB_2D_PAT_0888ARGB            (0x00058000)
+#define PSB_2D_PAT_8888ARGB            (0x00060000)
+
+#define PSB_2D_PAT_STRIDE_MASK         (0x00007FFF)
+#define PSB_2D_PAT_STRIDE_CLRMASK      (0xFFFF8000)
+#define PSB_2D_PAT_STRIDE_SHIFT                (0)
+/*
+ *  WORD 1 - Base Address
+ */
+#define PSB_2D_PAT_ADDR_MASK           (0x0FFFFFFC)
+#define PSB_2D_PAT_ADDR_CLRMASK                (0x00000003)
+#define PSB_2D_PAT_ADDR_SHIFT          (2)
+#define PSB_2D_PAT_ADDR_ALIGNSHIFT     (2)
+
+/*
+ * Destination Surface (PSB_2D_DST_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+
+#define PSB_2D_DST_FORMAT_MASK         (0x00078000)
+#define PSB_2D_DST_332RGB              (0x00030000)
+#define PSB_2D_DST_4444ARGB            (0x00038000)
+#define PSB_2D_DST_555RGB              (0x00040000)
+#define PSB_2D_DST_1555ARGB            (0x00048000)
+#define PSB_2D_DST_565RGB              (0x00050000)
+#define PSB_2D_DST_0888ARGB            (0x00058000)
+#define PSB_2D_DST_8888ARGB            (0x00060000)
+#define PSB_2D_DST_8888AYUV            (0x00070000)
+
+#define PSB_2D_DST_STRIDE_MASK         (0x00007FFF)
+#define PSB_2D_DST_STRIDE_CLRMASK      (0xFFFF8000)
+#define PSB_2D_DST_STRIDE_SHIFT                (0)
+/*
+ * WORD 1 - Base Address
+ */
+#define PSB_2D_DST_ADDR_MASK           (0x0FFFFFFC)
+#define PSB_2D_DST_ADDR_CLRMASK                (0x00000003)
+#define PSB_2D_DST_ADDR_SHIFT          (2)
+#define PSB_2D_DST_ADDR_ALIGNSHIFT     (2)
+
+/*
+ * Mask Surface (PSB_2D_MASK_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+#define PSB_2D_MASK_STRIDE_MASK                (0x00007FFF)
+#define PSB_2D_MASK_STRIDE_CLRMASK     (0xFFFF8000)
+#define PSB_2D_MASK_STRIDE_SHIFT       (0)
+/*
+ *  WORD 1 - Base Address
+ */
+#define PSB_2D_MASK_ADDR_MASK          (0x0FFFFFFC)
+#define PSB_2D_MASK_ADDR_CLRMASK       (0x00000003)
+#define PSB_2D_MASK_ADDR_SHIFT         (2)
+#define PSB_2D_MASK_ADDR_ALIGNSHIFT    (2)
+
+/*
+ * Source Palette (PSB_2D_SRC_PAL_BH)
+ */
+
+#define PSB_2D_SRCPAL_ADDR_SHIFT       (0)
+#define PSB_2D_SRCPAL_ADDR_CLRMASK     (0xF0000007)
+#define PSB_2D_SRCPAL_ADDR_MASK                (0x0FFFFFF8)
+#define PSB_2D_SRCPAL_BYTEALIGN                (1024)
+
+/*
+ * Pattern Palette (PSB_2D_PAT_PAL_BH)
+ */
+
+#define PSB_2D_PATPAL_ADDR_SHIFT       (0)
+#define PSB_2D_PATPAL_ADDR_CLRMASK     (0xF0000007)
+#define PSB_2D_PATPAL_ADDR_MASK                (0x0FFFFFF8)
+#define PSB_2D_PATPAL_BYTEALIGN                (1024)
+
+/*
+ * Rop3 Codes (2 LS bytes)
+ */
+
+#define PSB_2D_ROP3_SRCCOPY            (0xCCCC)
+#define PSB_2D_ROP3_PATCOPY            (0xF0F0)
+#define PSB_2D_ROP3_WHITENESS          (0xFFFF)
+#define PSB_2D_ROP3_BLACKNESS          (0x0000)
+#define PSB_2D_ROP3_SRC                        (0xCC)
+#define PSB_2D_ROP3_PAT                        (0xF0)
+#define PSB_2D_ROP3_DST                        (0xAA)
+
+/*
+ * Sizes.
+ */
+
+#define PSB_SCENE_HW_COOKIE_SIZE       16
+#define PSB_TA_MEM_HW_COOKIE_SIZE      16
+
+/*
+ * Scene stuff.
+ */
+
+#define PSB_NUM_HW_SCENES              2
+
+/*
+ * Scheduler completion actions.
+ */
+
+#define PSB_RASTER_BLOCK               0
+#define PSB_RASTER                     1
+#define PSB_RETURN                     2
+#define PSB_TA                         3
+
+/* Power management */
+#define PSB_PUNIT_PORT                 0x04
+#define PSB_OSPMBA                     0x78
+#define PSB_APMBA                      0x7a
+#define PSB_APM_CMD                    0x0
+#define PSB_APM_STS                    0x04
+#define PSB_PWRGT_VID_ENC_MASK         0x30
+#define PSB_PWRGT_VID_DEC_MASK         0xc
+#define PSB_PWRGT_GL3_MASK             0xc0
+
+#define PSB_PM_SSC                     0x20
+#define PSB_PM_SSS                     0x30
+#define PSB_PWRGT_DISPLAY_MASK         0xc /*on a different BA than video/gfx*/
+#define MDFLD_PWRGT_DISPLAY_A_CNTR     0x0000000c
+#define MDFLD_PWRGT_DISPLAY_B_CNTR     0x0000c000
+#define MDFLD_PWRGT_DISPLAY_C_CNTR     0x00030000
+#define MDFLD_PWRGT_DISP_MIPI_CNTR     0x000c0000
+#define MDFLD_PWRGT_DISPLAY_CNTR    (MDFLD_PWRGT_DISPLAY_A_CNTR | MDFLD_PWRGT_DISPLAY_B_CNTR | MDFLD_PWRGT_DISPLAY_C_CNTR | MDFLD_PWRGT_DISP_MIPI_CNTR) /* 0x000fc00c */
+/* Display SSS register bits are different in A0 vs. B0 */
+#define PSB_PWRGT_GFX_MASK             0x3
+#define MDFLD_PWRGT_DISPLAY_A_STS      0x000000c0
+#define MDFLD_PWRGT_DISPLAY_B_STS      0x00000300
+#define MDFLD_PWRGT_DISPLAY_C_STS      0x00000c00
+#define PSB_PWRGT_GFX_MASK_B0          0xc3
+#define MDFLD_PWRGT_DISPLAY_A_STS_B0   0x0000000c
+#define MDFLD_PWRGT_DISPLAY_B_STS_B0   0x0000c000
+#define MDFLD_PWRGT_DISPLAY_C_STS_B0   0x00030000
+#define MDFLD_PWRGT_DISP_MIPI_STS      0x000c0000
+#define MDFLD_PWRGT_DISPLAY_STS_A0    (MDFLD_PWRGT_DISPLAY_A_STS | MDFLD_PWRGT_DISPLAY_B_STS | MDFLD_PWRGT_DISPLAY_C_STS | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
+#define MDFLD_PWRGT_DISPLAY_STS_B0    (MDFLD_PWRGT_DISPLAY_A_STS_B0 | MDFLD_PWRGT_DISPLAY_B_STS_B0 | MDFLD_PWRGT_DISPLAY_C_STS_B0 | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
+#endif
index 8f371e8..f7c17b2 100644 (file)
@@ -222,8 +222,6 @@ static int i810_dma_cleanup(struct drm_device *dev)
                        pci_free_consistent(dev->pdev, PAGE_SIZE,
                                            dev_priv->hw_status_page,
                                            dev_priv->dma_status_page);
-                       /* Need to rewrite hardware status page */
-                       I810_WRITE(0x02080, 0x1ffff000);
                }
                kfree(dev->dev_private);
                dev->dev_private = NULL;
@@ -888,7 +886,7 @@ static int i810_flush_queue(struct drm_device *dev)
 }
 
 /* Must be called with the lock held */
-static void i810_reclaim_buffers(struct drm_device *dev,
+void i810_driver_reclaim_buffers(struct drm_device *dev,
                                 struct drm_file *file_priv)
 {
        struct drm_device_dma *dma = dev->dma;
@@ -1225,12 +1223,17 @@ void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
                if (dev_priv->page_flipping)
                        i810_do_cleanup_pageflip(dev);
        }
-}
 
-void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
-                                       struct drm_file *file_priv)
-{
-       i810_reclaim_buffers(dev, file_priv);
+       if (file_priv->master && file_priv->master->lock.hw_lock) {
+               drm_idlelock_take(&file_priv->master->lock);
+               i810_driver_reclaim_buffers(dev, file_priv);
+               drm_idlelock_release(&file_priv->master->lock);
+       } else {
+               /* master disappeared, clean up stuff anyway and hope nothing
+                * goes wrong */
+               i810_driver_reclaim_buffers(dev, file_priv);
+       }
+
 }
 
 int i810_driver_dma_quiescent(struct drm_device *dev)
index d4266bd..053f1ee 100644 (file)
@@ -43,6 +43,17 @@ static struct pci_device_id pciidlist[] = {
        i810_PCI_IDS
 };
 
+static const struct file_operations i810_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = drm_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .llseek = noop_llseek,
+};
+
 static struct drm_driver driver = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
@@ -52,20 +63,9 @@ static struct drm_driver driver = {
        .lastclose = i810_driver_lastclose,
        .preclose = i810_driver_preclose,
        .device_is_agp = i810_driver_device_is_agp,
-       .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
        .dma_quiescent = i810_driver_dma_quiescent,
        .ioctls = i810_ioctls,
-       .fops = {
-                .owner = THIS_MODULE,
-                .open = drm_open,
-                .release = drm_release,
-                .unlocked_ioctl = drm_ioctl,
-                .mmap = drm_mmap,
-                .poll = drm_poll,
-                .fasync = drm_fasync,
-                .llseek = noop_llseek,
-       },
-
+       .fops = &i810_driver_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = DRIVER_DATE,
index c9339f4..6e0acad 100644 (file)
@@ -116,14 +116,12 @@ typedef struct drm_i810_private {
 
                                /* i810_dma.c */
 extern int i810_driver_dma_quiescent(struct drm_device *dev);
-extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
-                                              struct drm_file *file_priv);
+void i810_driver_reclaim_buffers(struct drm_device *dev,
+                                struct drm_file *file_priv);
 extern int i810_driver_load(struct drm_device *, unsigned long flags);
 extern void i810_driver_lastclose(struct drm_device *dev);
 extern void i810_driver_preclose(struct drm_device *dev,
                                 struct drm_file *file_priv);
-extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
-                                              struct drm_file *file_priv);
 extern int i810_driver_device_is_agp(struct drm_device *dev);
 
 extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
index 0ae6a7c..808b255 100644 (file)
@@ -28,6 +28,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
          intel_dvo.o \
          intel_ringbuffer.o \
          intel_overlay.o \
+         intel_sprite.o \
          intel_opregion.o \
          dvo_ch7xxx.o \
          dvo_ch7017.o \
index 004b048..1180798 100644 (file)
@@ -1001,7 +1001,7 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
        return 0;
 }
 
-static int i915_drpc_info(struct seq_file *m, void *unused)
+static int ironlake_drpc_info(struct seq_file *m)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
@@ -1068,6 +1068,90 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
        return 0;
 }
 
+static int gen6_drpc_info(struct seq_file *m)
+{
+
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 rpmodectl1, gt_core_status, rcctl1;
+       int count=0, ret;
+
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       if (atomic_read(&dev_priv->forcewake_count)) {
+               seq_printf(m, "RC information inaccurate because userspace "
+                             "holds a reference \n");
+       } else {
+               /* NB: we cannot use forcewake, else we read the wrong values */
+               while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
+                       udelay(10);
+               seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
+       }
+
+       gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
+       trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
+
+       rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
+       rcctl1 = I915_READ(GEN6_RC_CONTROL);
+       mutex_unlock(&dev->struct_mutex);
+
+       seq_printf(m, "Video Turbo Mode: %s\n",
+                  yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
+       seq_printf(m, "HW control enabled: %s\n",
+                  yesno(rpmodectl1 & GEN6_RP_ENABLE));
+       seq_printf(m, "SW control enabled: %s\n",
+                  yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
+                         GEN6_RP_MEDIA_SW_MODE));
+       seq_printf(m, "RC6 Enabled: %s\n",
+                  yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
+       seq_printf(m, "RC6 Enabled: %s\n",
+                  yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
+       seq_printf(m, "Deep RC6 Enabled: %s\n",
+                  yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
+       seq_printf(m, "Deepest RC6 Enabled: %s\n",
+                  yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
+       seq_printf(m, "Current RC state: ");
+       switch (gt_core_status & GEN6_RCn_MASK) {
+       case GEN6_RC0:
+               if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
+                       seq_printf(m, "Core Power Down\n");
+               else
+                       seq_printf(m, "on\n");
+               break;
+       case GEN6_RC3:
+               seq_printf(m, "RC3\n");
+               break;
+       case GEN6_RC6:
+               seq_printf(m, "RC6\n");
+               break;
+       case GEN6_RC7:
+               seq_printf(m, "RC7\n");
+               break;
+       default:
+               seq_printf(m, "Unknown\n");
+               break;
+       }
+
+       seq_printf(m, "Core Power Down: %s\n",
+                  yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+       return 0;
+}
+
+static int i915_drpc_info(struct seq_file *m, void *unused)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+
+       if (IS_GEN6(dev) || IS_GEN7(dev))
+               return gen6_drpc_info(m);
+       else
+               return ironlake_drpc_info(m);
+}
+
 static int i915_fbc_status(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
index a9ae374..5f4d589 100644 (file)
@@ -781,6 +781,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
        case I915_PARAM_HAS_RELAXED_DELTA:
                value = 1;
                break;
+       case I915_PARAM_HAS_GEN7_SOL_RESET:
+               value = 1;
+               break;
        default:
                DRM_DEBUG_DRIVER("Unknown parameter %d\n",
                                 param->param);
@@ -2305,6 +2308,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
        DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 };
 
 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
index a1103fc..8f71879 100644 (file)
@@ -810,6 +810,21 @@ static struct vm_operations_struct i915_gem_vm_ops = {
        .close = drm_gem_vm_close,
 };
 
+static const struct file_operations i915_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = drm_gem_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .read = drm_read,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = i915_compat_ioctl,
+#endif
+       .llseek = noop_llseek,
+};
+
 static struct drm_driver driver = {
        /* Don't use MTRRs here; the Xserver or userspace app should
         * deal with them for Intel hardware.
@@ -843,21 +858,7 @@ static struct drm_driver driver = {
        .dumb_map_offset = i915_gem_mmap_gtt,
        .dumb_destroy = i915_gem_dumb_destroy,
        .ioctls = i915_ioctls,
-       .fops = {
-                .owner = THIS_MODULE,
-                .open = drm_open,
-                .release = drm_release,
-                .unlocked_ioctl = drm_ioctl,
-                .mmap = drm_gem_mmap,
-                .poll = drm_poll,
-                .fasync = drm_fasync,
-                .read = drm_read,
-#ifdef CONFIG_COMPAT
-                .compat_ioctl = i915_compat_ioctl,
-#endif
-                .llseek = noop_llseek,
-       },
-
+       .fops = &i915_driver_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = DRIVER_DATE,
@@ -922,13 +923,6 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL and additional rights");
 
-/* We give fast paths for the really cool registers */
-#define NEEDS_FORCE_WAKE(dev_priv, reg) \
-       (((dev_priv)->info->gen >= 6) && \
-        ((reg) < 0x40000) &&            \
-        ((reg) != FORCEWAKE) &&         \
-        ((reg) != ECOBUS))
-
 #define __i915_read(x, y) \
 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
        u##x val = 0; \
index 554bef7..602bc80 100644 (file)
@@ -207,6 +207,8 @@ struct drm_i915_display_funcs {
        int (*get_display_clock_speed)(struct drm_device *dev);
        int (*get_fifo_size)(struct drm_device *dev, int plane);
        void (*update_wm)(struct drm_device *dev);
+       void (*update_sprite_wm)(struct drm_device *dev, int pipe,
+                                uint32_t sprite_width, int pixel_size);
        int (*crtc_mode_set)(struct drm_crtc *crtc,
                             struct drm_display_mode *mode,
                             struct drm_display_mode *adjusted_mode,
@@ -337,6 +339,8 @@ typedef struct drm_i915_private {
        struct timer_list hangcheck_timer;
        int hangcheck_count;
        uint32_t last_acthd;
+       uint32_t last_acthd_bsd;
+       uint32_t last_acthd_blt;
        uint32_t last_instdone;
        uint32_t last_instdone1;
 
@@ -350,6 +354,7 @@ typedef struct drm_i915_private {
 
        /* overlay */
        struct intel_overlay *overlay;
+       bool sprite_scaling_enabled;
 
        /* LVDS info */
        int backlight_level;  /* restore backlight to this value */
@@ -1362,8 +1367,7 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
        (((dev_priv)->info->gen >= 6) && \
         ((reg) < 0x40000) &&            \
-        ((reg) != FORCEWAKE) &&         \
-        ((reg) != ECOBUS))
+        ((reg) != FORCEWAKE))
 
 #define __i915_read(x, y) \
        u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
index 8359dc7..e55badb 100644 (file)
@@ -2006,9 +2006,9 @@ i915_wait_request(struct intel_ring_buffer *ring,
                                           || atomic_read(&dev_priv->mm.wedged));
 
                        ring->irq_put(ring);
-               } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
-                                                     seqno) ||
-                                   atomic_read(&dev_priv->mm.wedged), 3000))
+               } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
+                                                            seqno) ||
+                                          atomic_read(&dev_priv->mm.wedged), 3000))
                        ret = -EBUSY;
                ring->waiting_seqno = 0;
 
@@ -3309,6 +3309,10 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 
                        if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
                                ret = -EIO;
+               } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
+                                                            seqno) ||
+                                   atomic_read(&dev_priv->mm.wedged), 3000)) {
+                       ret = -EBUSY;
                }
        }
 
index b9da890..65e1f00 100644 (file)
@@ -971,6 +971,31 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
 }
 
 static int
+i915_reset_gen7_sol_offsets(struct drm_device *dev,
+                           struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       int ret, i;
+
+       if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
+               return 0;
+
+       ret = intel_ring_begin(ring, 4 * 3);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < 4; i++) {
+               intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+               intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
+               intel_ring_emit(ring, 0);
+       }
+
+       intel_ring_advance(ring);
+
+       return 0;
+}
+
+static int
 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                       struct drm_file *file,
                       struct drm_i915_gem_execbuffer2 *args,
@@ -984,6 +1009,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        struct intel_ring_buffer *ring;
        u32 exec_start, exec_len;
        u32 seqno;
+       u32 mask;
        int ret, mode, i;
 
        if (!i915_gem_check_execbuffer(args)) {
@@ -1021,6 +1047,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        }
 
        mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+       mask = I915_EXEC_CONSTANTS_MASK;
        switch (mode) {
        case I915_EXEC_CONSTANTS_REL_GENERAL:
        case I915_EXEC_CONSTANTS_ABSOLUTE:
@@ -1034,18 +1061,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                            mode == I915_EXEC_CONSTANTS_REL_SURFACE)
                                return -EINVAL;
 
-                       ret = intel_ring_begin(ring, 4);
-                       if (ret)
-                               return ret;
-
-                       intel_ring_emit(ring, MI_NOOP);
-                       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-                       intel_ring_emit(ring, INSTPM);
-                       intel_ring_emit(ring,
-                                       I915_EXEC_CONSTANTS_MASK << 16 | mode);
-                       intel_ring_advance(ring);
-
-                       dev_priv->relative_constants_mode = mode;
+                       /* The HW changed the meaning on this bit on gen6 */
+                       if (INTEL_INFO(dev)->gen >= 6)
+                               mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
                }
                break;
        default:
@@ -1176,6 +1194,27 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                }
        }
 
+       if (ring == &dev_priv->ring[RCS] &&
+           mode != dev_priv->relative_constants_mode) {
+               ret = intel_ring_begin(ring, 4);
+               if (ret)
+                               goto err;
+
+               intel_ring_emit(ring, MI_NOOP);
+               intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+               intel_ring_emit(ring, INSTPM);
+               intel_ring_emit(ring, mask << 16 | mode);
+               intel_ring_advance(ring);
+
+               dev_priv->relative_constants_mode = mode;
+       }
+
+       if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
+               ret = i915_reset_gen7_sol_offsets(dev, ring);
+               if (ret)
+                       goto err;
+       }
+
        trace_i915_gem_ring_dispatch(ring, seqno);
 
        exec_start = batch_obj->gtt_offset + args->batch_start_offset;
index b40004b..5d433fc 100644 (file)
@@ -1205,7 +1205,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
        } else {
                int dspaddr = DSPADDR(intel_crtc->plane);
                stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
-                                                       crtc->y * crtc->fb->pitch +
+                                                       crtc->y * crtc->fb->pitches[0] +
                                                        crtc->x * crtc->fb->bits_per_pixel/8);
        }
 
@@ -1649,13 +1649,6 @@ static bool kick_ring(struct intel_ring_buffer *ring)
                I915_WRITE_CTL(ring, tmp);
                return true;
        }
-       if (IS_GEN6(dev) &&
-           (tmp & RING_WAIT_SEMAPHORE)) {
-               DRM_ERROR("Kicking stuck semaphore on %s\n",
-                         ring->name);
-               I915_WRITE_CTL(ring, tmp);
-               return true;
-       }
        return false;
 }
 
@@ -1669,7 +1662,7 @@ void i915_hangcheck_elapsed(unsigned long data)
 {
        struct drm_device *dev = (struct drm_device *)data;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       uint32_t acthd, instdone, instdone1;
+       uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
        bool err = false;
 
        if (!i915_enable_hangcheck)
@@ -1686,16 +1679,21 @@ void i915_hangcheck_elapsed(unsigned long data)
        }
 
        if (INTEL_INFO(dev)->gen < 4) {
-               acthd = I915_READ(ACTHD);
                instdone = I915_READ(INSTDONE);
                instdone1 = 0;
        } else {
-               acthd = I915_READ(ACTHD_I965);
                instdone = I915_READ(INSTDONE_I965);
                instdone1 = I915_READ(INSTDONE1);
        }
+       acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]);
+       acthd_bsd = HAS_BSD(dev) ?
+               intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0;
+       acthd_blt = HAS_BLT(dev) ?
+               intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0;
 
        if (dev_priv->last_acthd == acthd &&
+           dev_priv->last_acthd_bsd == acthd_bsd &&
+           dev_priv->last_acthd_blt == acthd_blt &&
            dev_priv->last_instdone == instdone &&
            dev_priv->last_instdone1 == instdone1) {
                if (dev_priv->hangcheck_count++ > 1) {
@@ -1727,6 +1725,8 @@ void i915_hangcheck_elapsed(unsigned long data)
                dev_priv->hangcheck_count = 0;
 
                dev_priv->last_acthd = acthd;
+               dev_priv->last_acthd_bsd = acthd_bsd;
+               dev_priv->last_acthd_blt = acthd_blt;
                dev_priv->last_instdone = instdone;
                dev_priv->last_instdone1 = instdone1;
        }
index a26d5b0..c3afb78 100644 (file)
 #define   INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
                                        will not assert AGPBUSY# and will only
                                        be delivered when out of C3. */
+#define   INSTPM_FORCE_ORDERING                                (1<<7) /* GEN6+ */
 #define ACTHD          0x020c8
 #define FW_BLC         0x020d8
 #define FW_BLC2                0x020dc
 #define   PIPECONF_PROGRESSIVE (0 << 21)
 #define   PIPECONF_INTERLACE_W_FIELD_INDICATION        (6 << 21)
 #define   PIPECONF_INTERLACE_FIELD_0_ONLY              (7 << 21)
+#define   PIPECONF_INTERLACE_MASK      (7 << 21)
 #define   PIPECONF_CXSR_DOWNCLOCK      (1<<16)
 #define   PIPECONF_BPP_MASK    (0x000000e0)
 #define   PIPECONF_BPP_8       (0<<5)
 #define WM3_LP_ILK             0x45110
 #define  WM3_LP_EN             (1<<31)
 #define WM1S_LP_ILK            0x45120
+#define WM2S_LP_IVB            0x45124
+#define WM3S_LP_IVB            0x45128
 #define  WM1S_LP_EN            (1<<31)
 
 /* Memory latency timer register */
 #define _DSPBSURF              0x7119C
 #define _DSPBTILEOFF           0x711A4
 
+/* Sprite A control */
+#define _DVSACNTR              0x72180
+#define   DVS_ENABLE           (1<<31)
+#define   DVS_GAMMA_ENABLE     (1<<30)
+#define   DVS_PIXFORMAT_MASK   (3<<25)
+#define   DVS_FORMAT_YUV422    (0<<25)
+#define   DVS_FORMAT_RGBX101010        (1<<25)
+#define   DVS_FORMAT_RGBX888   (2<<25)
+#define   DVS_FORMAT_RGBX161616        (3<<25)
+#define   DVS_SOURCE_KEY       (1<<22)
+#define   DVS_RGB_ORDER_RGBX   (1<<20)
+#define   DVS_YUV_BYTE_ORDER_MASK (3<<16)
+#define   DVS_YUV_ORDER_YUYV   (0<<16)
+#define   DVS_YUV_ORDER_UYVY   (1<<16)
+#define   DVS_YUV_ORDER_YVYU   (2<<16)
+#define   DVS_YUV_ORDER_VYUY   (3<<16)
+#define   DVS_DEST_KEY         (1<<2)
+#define   DVS_TRICKLE_FEED_DISABLE (1<<14)
+#define   DVS_TILED            (1<<10)
+#define _DVSALINOFF            0x72184
+#define _DVSASTRIDE            0x72188
+#define _DVSAPOS               0x7218c
+#define _DVSASIZE              0x72190
+#define _DVSAKEYVAL            0x72194
+#define _DVSAKEYMSK            0x72198
+#define _DVSASURF              0x7219c
+#define _DVSAKEYMAXVAL         0x721a0
+#define _DVSATILEOFF           0x721a4
+#define _DVSASURFLIVE          0x721ac
+#define _DVSASCALE             0x72204
+#define   DVS_SCALE_ENABLE     (1<<31)
+#define   DVS_FILTER_MASK      (3<<29)
+#define   DVS_FILTER_MEDIUM    (0<<29)
+#define   DVS_FILTER_ENHANCING (1<<29)
+#define   DVS_FILTER_SOFTENING (2<<29)
+#define   DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
+#define   DVS_VERTICAL_OFFSET_ENABLE (1<<27)
+#define _DVSAGAMC              0x72300
+
+#define _DVSBCNTR              0x73180
+#define _DVSBLINOFF            0x73184
+#define _DVSBSTRIDE            0x73188
+#define _DVSBPOS               0x7318c
+#define _DVSBSIZE              0x73190
+#define _DVSBKEYVAL            0x73194
+#define _DVSBKEYMSK            0x73198
+#define _DVSBSURF              0x7319c
+#define _DVSBKEYMAXVAL         0x731a0
+#define _DVSBTILEOFF           0x731a4
+#define _DVSBSURFLIVE          0x731ac
+#define _DVSBSCALE             0x73204
+#define _DVSBGAMC              0x73300
+
+#define DVSCNTR(pipe) _PIPE(pipe, _DVSACNTR, _DVSBCNTR)
+#define DVSLINOFF(pipe) _PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
+#define DVSSTRIDE(pipe) _PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
+#define DVSPOS(pipe) _PIPE(pipe, _DVSAPOS, _DVSBPOS)
+#define DVSSURF(pipe) _PIPE(pipe, _DVSASURF, _DVSBSURF)
+#define DVSKEYMAX(pipe) _PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
+#define DVSSIZE(pipe) _PIPE(pipe, _DVSASIZE, _DVSBSIZE)
+#define DVSSCALE(pipe) _PIPE(pipe, _DVSASCALE, _DVSBSCALE)
+#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
+#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
+#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
+
+#define _SPRA_CTL              0x70280
+#define   SPRITE_ENABLE                        (1<<31)
+#define   SPRITE_GAMMA_ENABLE          (1<<30)
+#define   SPRITE_PIXFORMAT_MASK                (7<<25)
+#define   SPRITE_FORMAT_YUV422         (0<<25)
+#define   SPRITE_FORMAT_RGBX101010     (1<<25)
+#define   SPRITE_FORMAT_RGBX888                (2<<25)
+#define   SPRITE_FORMAT_RGBX161616     (3<<25)
+#define   SPRITE_FORMAT_YUV444         (4<<25)
+#define   SPRITE_FORMAT_XR_BGR101010   (5<<25) /* Extended range */
+#define   SPRITE_CSC_ENABLE            (1<<24)
+#define   SPRITE_SOURCE_KEY            (1<<22)
+#define   SPRITE_RGB_ORDER_RGBX                (1<<20) /* only for 888 and 161616 */
+#define   SPRITE_YUV_TO_RGB_CSC_DISABLE        (1<<19)
+#define   SPRITE_YUV_CSC_FORMAT_BT709  (1<<18) /* 0 is BT601 */
+#define   SPRITE_YUV_BYTE_ORDER_MASK   (3<<16)
+#define   SPRITE_YUV_ORDER_YUYV                (0<<16)
+#define   SPRITE_YUV_ORDER_UYVY                (1<<16)
+#define   SPRITE_YUV_ORDER_YVYU                (2<<16)
+#define   SPRITE_YUV_ORDER_VYUY                (3<<16)
+#define   SPRITE_TRICKLE_FEED_DISABLE  (1<<14)
+#define   SPRITE_INT_GAMMA_ENABLE      (1<<13)
+#define   SPRITE_TILED                 (1<<10)
+#define   SPRITE_DEST_KEY              (1<<2)
+#define _SPRA_LINOFF           0x70284
+#define _SPRA_STRIDE           0x70288
+#define _SPRA_POS              0x7028c
+#define _SPRA_SIZE             0x70290
+#define _SPRA_KEYVAL           0x70294
+#define _SPRA_KEYMSK           0x70298
+#define _SPRA_SURF             0x7029c
+#define _SPRA_KEYMAX           0x702a0
+#define _SPRA_TILEOFF          0x702a4
+#define _SPRA_SCALE            0x70304
+#define   SPRITE_SCALE_ENABLE  (1<<31)
+#define   SPRITE_FILTER_MASK   (3<<29)
+#define   SPRITE_FILTER_MEDIUM (0<<29)
+#define   SPRITE_FILTER_ENHANCING      (1<<29)
+#define   SPRITE_FILTER_SOFTENING      (2<<29)
+#define   SPRITE_VERTICAL_OFFSET_HALF  (1<<28) /* must be enabled below */
+#define   SPRITE_VERTICAL_OFFSET_ENABLE        (1<<27)
+#define _SPRA_GAMC             0x70400
+
+#define _SPRB_CTL              0x71280
+#define _SPRB_LINOFF           0x71284
+#define _SPRB_STRIDE           0x71288
+#define _SPRB_POS              0x7128c
+#define _SPRB_SIZE             0x71290
+#define _SPRB_KEYVAL           0x71294
+#define _SPRB_KEYMSK           0x71298
+#define _SPRB_SURF             0x7129c
+#define _SPRB_KEYMAX           0x712a0
+#define _SPRB_TILEOFF          0x712a4
+#define _SPRB_SCALE            0x71304
+#define _SPRB_GAMC             0x71400
+
+#define SPRCTL(pipe) _PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
+#define SPRLINOFF(pipe) _PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
+#define SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
+#define SPRPOS(pipe) _PIPE(pipe, _SPRA_POS, _SPRB_POS)
+#define SPRSIZE(pipe) _PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
+#define SPRKEYVAL(pipe) _PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
+#define SPRKEYMSK(pipe) _PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
+#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
+#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
+#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
+#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
+#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
+
 /* VBIOS regs */
 #define VGACNTRL               0x71400
 # define VGA_DISP_DISABLE                      (1 << 31)
 #define   ILK_DPFC_DIS1                (1<<8)
 #define   ILK_DPFC_DIS2                (1<<9)
 
+#define IVB_CHICKEN3   0x4200c
+# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE     (1 << 5)
+# define CHICKEN3_DGMG_DONE_FIX_DISABLE                (1 << 2)
+
 #define DISP_ARB_CTL   0x45000
 #define  DISP_TILE_SURFACE_SWIZZLING   (1<<13)
 #define  DISP_FBC_WM_DIS               (1<<15)
 #define   GEN6_CAGF_MASK                       (0x7f << GEN6_CAGF_SHIFT)
 #define GEN6_RP_CONTROL                                0xA024
 #define   GEN6_RP_MEDIA_TURBO                  (1<<11)
-#define   GEN6_RP_USE_NORMAL_FREQ              (1<<9)
+#define   GEN6_RP_MEDIA_MODE_MASK              (3<<9)
+#define   GEN6_RP_MEDIA_HW_TURBO_MODE          (3<<9)
+#define   GEN6_RP_MEDIA_HW_NORMAL_MODE         (2<<9)
+#define   GEN6_RP_MEDIA_HW_MODE                        (1<<9)
+#define   GEN6_RP_MEDIA_SW_MODE                        (0<<9)
 #define   GEN6_RP_MEDIA_IS_GFX                 (1<<8)
 #define   GEN6_RP_ENABLE                       (1<<7)
 #define   GEN6_RP_UP_IDLE_MIN                  (0x1<<3)
 #define GEN6_PCODE_DATA                                0x138128
 #define   GEN6_PCODE_FREQ_IA_RATIO_SHIFT       8
 
+#define GEN6_GT_CORE_STATUS            0x138060
+#define   GEN6_CORE_CPD_STATE_MASK     (7<<4)
+#define   GEN6_RCn_MASK                        7
+#define   GEN6_RC0                     0
+#define   GEN6_RC3                     2
+#define   GEN6_RC6                     3
+#define   GEN6_RC7                     4
+
 #define G4X_AUD_VID_DID                        0x62020
 #define INTEL_AUDIO_DEVCL              0x808629FB
 #define INTEL_AUDIO_DEVBLC             0x80862801
 #define G4X_ELD_ACK                    (1 << 4)
 #define G4X_HDMIW_HDMIEDID             0x6210C
 
-#define GEN5_HDMIW_HDMIEDID_A          0xE2050
-#define GEN5_AUD_CNTL_ST_A             0xE20B4
-#define GEN5_ELD_BUFFER_SIZE           (0x1f << 10)
-#define GEN5_ELD_ADDRESS               (0x1f << 5)
-#define GEN5_ELD_ACK                   (1 << 4)
-#define GEN5_AUD_CNTL_ST2              0xE20C0
-#define GEN5_ELD_VALIDB                        (1 << 0)
-#define GEN5_CP_READYB                 (1 << 1)
-
-#define GEN7_HDMIW_HDMIEDID_A          0xE5050
-#define GEN7_AUD_CNTRL_ST_A            0xE50B4
-#define GEN7_AUD_CNTRL_ST2             0xE50C0
+#define IBX_HDMIW_HDMIEDID_A           0xE2050
+#define IBX_AUD_CNTL_ST_A              0xE20B4
+#define IBX_ELD_BUFFER_SIZE            (0x1f << 10)
+#define IBX_ELD_ADDRESS                        (0x1f << 5)
+#define IBX_ELD_ACK                    (1 << 4)
+#define IBX_AUD_CNTL_ST2               0xE20C0
+#define IBX_ELD_VALIDB                 (1 << 0)
+#define IBX_CP_READYB                  (1 << 1)
+
+#define CPT_HDMIW_HDMIEDID_A           0xE5050
+#define CPT_AUD_CNTL_ST_A              0xE50B4
+#define CPT_AUD_CNTRL_ST2              0xE50C0
+
+/* These are the 4 32-bit write offset registers for each stream
+ * output buffer.  It determines the offset from the
+ * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
+ */
+#define GEN7_SO_WRITE_OFFSET(n)                (0x5280 + (n) * 4)
 
 #endif /* _I915_REG_H_ */
index daa5743..2a3f707 100644 (file)
@@ -915,8 +915,8 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
             pipe_name(pipe));
 }
 
-static void assert_pipe(struct drm_i915_private *dev_priv,
-                       enum pipe pipe, bool state)
+void assert_pipe(struct drm_i915_private *dev_priv,
+                enum pipe pipe, bool state)
 {
        int reg;
        u32 val;
@@ -929,8 +929,6 @@ static void assert_pipe(struct drm_i915_private *dev_priv,
             "pipe %c assertion failure (expected %s, current %s)\n",
             pipe_name(pipe), state_string(state), state_string(cur_state));
 }
-#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
-#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
 
 static void assert_plane_enabled(struct drm_i915_private *dev_priv,
                                 enum plane plane)
@@ -1206,7 +1204,8 @@ static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
                                  enum pipe pipe)
 {
        int reg;
-       u32 val;
+       u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
+               pll_sel = TRANSC_DPLL_ENABLE;
 
        if (pipe > 1)
                return;
@@ -1217,6 +1216,15 @@ static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
        /* Make sure transcoder isn't still depending on us */
        assert_transcoder_disabled(dev_priv, pipe);
 
+       if (pipe == 0)
+               pll_sel |= TRANSC_DPLLA_SEL;
+       else if (pipe == 1)
+               pll_sel |= TRANSC_DPLLB_SEL;
+
+
+       if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
+               return;
+
        reg = PCH_DPLL(pipe);
        val = I915_READ(reg);
        val &= ~DPLL_VCO_ENABLE;
@@ -1511,8 +1519,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        u32 fbc_ctl, fbc_ctl2;
 
        cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
-       if (fb->pitch < cfb_pitch)
-               cfb_pitch = fb->pitch;
+       if (fb->pitches[0] < cfb_pitch)
+               cfb_pitch = fb->pitches[0];
 
        /* FBC_CTL wants 64B units */
        cfb_pitch = (cfb_pitch / 64) - 1;
@@ -2073,11 +2081,11 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        I915_WRITE(reg, dspcntr);
 
        Start = obj->gtt_offset;
-       Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
+       Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
 
        DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
-                     Start, Offset, x, y, fb->pitch);
-       I915_WRITE(DSPSTRIDE(plane), fb->pitch);
+                     Start, Offset, x, y, fb->pitches[0]);
+       I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        if (INTEL_INFO(dev)->gen >= 4) {
                I915_WRITE(DSPSURF(plane), Start);
                I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
@@ -2154,11 +2162,11 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
        I915_WRITE(reg, dspcntr);
 
        Start = obj->gtt_offset;
-       Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
+       Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
 
        DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
-                     Start, Offset, x, y, fb->pitch);
-       I915_WRITE(DSPSTRIDE(plane), fb->pitch);
+                     Start, Offset, x, y, fb->pitches[0]);
+       I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        I915_WRITE(DSPSURF(plane), Start);
        I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
        I915_WRITE(DSPADDR(plane), Offset);
@@ -4509,7 +4517,7 @@ static void ironlake_update_wm(struct drm_device *dev)
         */
 }
 
-static void sandybridge_update_wm(struct drm_device *dev)
+void sandybridge_update_wm(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
@@ -4569,7 +4577,8 @@ static void sandybridge_update_wm(struct drm_device *dev)
        I915_WRITE(WM2_LP_ILK, 0);
        I915_WRITE(WM1_LP_ILK, 0);
 
-       if (!single_plane_enabled(enabled))
+       if (!single_plane_enabled(enabled) ||
+           dev_priv->sprite_scaling_enabled)
                return;
        enabled = ffs(enabled) - 1;
 
@@ -4619,6 +4628,149 @@ static void sandybridge_update_wm(struct drm_device *dev)
                   cursor_wm);
 }
 
+static bool
+sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
+                             uint32_t sprite_width, int pixel_size,
+                             const struct intel_watermark_params *display,
+                             int display_latency_ns, int *sprite_wm)
+{
+       struct drm_crtc *crtc;
+       int clock;
+       int entries, tlb_miss;
+
+       crtc = intel_get_crtc_for_plane(dev, plane);
+       if (crtc->fb == NULL || !crtc->enabled) {
+               *sprite_wm = display->guard_size;
+               return false;
+       }
+
+       clock = crtc->mode.clock;
+
+       /* Use the small buffer method to calculate the sprite watermark */
+       entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+       tlb_miss = display->fifo_size*display->cacheline_size -
+               sprite_width * 8;
+       if (tlb_miss > 0)
+               entries += tlb_miss;
+       entries = DIV_ROUND_UP(entries, display->cacheline_size);
+       *sprite_wm = entries + display->guard_size;
+       if (*sprite_wm > (int)display->max_wm)
+               *sprite_wm = display->max_wm;
+
+       return true;
+}
+
+static bool
+sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
+                               uint32_t sprite_width, int pixel_size,
+                               const struct intel_watermark_params *display,
+                               int latency_ns, int *sprite_wm)
+{
+       struct drm_crtc *crtc;
+       unsigned long line_time_us;
+       int clock;
+       int line_count, line_size;
+       int small, large;
+       int entries;
+
+       if (!latency_ns) {
+               *sprite_wm = 0;
+               return false;
+       }
+
+       crtc = intel_get_crtc_for_plane(dev, plane);
+       clock = crtc->mode.clock;
+
+       line_time_us = (sprite_width * 1000) / clock;
+       line_count = (latency_ns / line_time_us + 1000) / 1000;
+       line_size = sprite_width * pixel_size;
+
+       /* Use the minimum of the small and large buffer method for primary */
+       small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+       large = line_count * line_size;
+
+       entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+       *sprite_wm = entries + display->guard_size;
+
+       return *sprite_wm > 0x3ff ? false : true;
+}
+
+static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
+                                        uint32_t sprite_width, int pixel_size)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
+       int sprite_wm, reg;
+       int ret;
+
+       switch (pipe) {
+       case 0:
+               reg = WM0_PIPEA_ILK;
+               break;
+       case 1:
+               reg = WM0_PIPEB_ILK;
+               break;
+       case 2:
+               reg = WM0_PIPEC_IVB;
+               break;
+       default:
+               return; /* bad pipe */
+       }
+
+       ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
+                                           &sandybridge_display_wm_info,
+                                           latency, &sprite_wm);
+       if (!ret) {
+               DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
+                             pipe);
+               return;
+       }
+
+       I915_WRITE(reg, I915_READ(reg) | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
+       DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
+
+
+       ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+                                             pixel_size,
+                                             &sandybridge_display_srwm_info,
+                                             SNB_READ_WM1_LATENCY() * 500,
+                                             &sprite_wm);
+       if (!ret) {
+               DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
+                             pipe);
+               return;
+       }
+       I915_WRITE(WM1S_LP_ILK, sprite_wm);
+
+       /* Only IVB has two more LP watermarks for sprite */
+       if (!IS_IVYBRIDGE(dev))
+               return;
+
+       ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+                                             pixel_size,
+                                             &sandybridge_display_srwm_info,
+                                             SNB_READ_WM2_LATENCY() * 500,
+                                             &sprite_wm);
+       if (!ret) {
+               DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
+                             pipe);
+               return;
+       }
+       I915_WRITE(WM2S_LP_IVB, sprite_wm);
+
+       ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+                                             pixel_size,
+                                             &sandybridge_display_srwm_info,
+                                             SNB_READ_WM3_LATENCY() * 500,
+                                             &sprite_wm);
+       if (!ret) {
+               DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
+                             pipe);
+               return;
+       }
+       I915_WRITE(WM3S_LP_IVB, sprite_wm);
+}
+
 /**
  * intel_update_watermarks - update FIFO watermark values based on current modes
  *
@@ -4659,6 +4811,16 @@ static void intel_update_watermarks(struct drm_device *dev)
                dev_priv->display.update_wm(dev);
 }
 
+void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+                                   uint32_t sprite_width, int pixel_size)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (dev_priv->display.update_sprite_wm)
+               dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
+                                                  pixel_size);
+}
+
 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
 {
        if (i915_panel_use_ssc >= 0)
@@ -5155,7 +5317,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                adjusted_mode->crtc_vsync_end -= 1;
                adjusted_mode->crtc_vsync_start -= 1;
        } else
-               pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
+               pipeconf &= ~PIPECONF_INTERLACE_MASK; /* progressive */
 
        I915_WRITE(HTOTAL(pipe),
                   (adjusted_mode->crtc_hdisplay - 1) |
@@ -5822,14 +5984,45 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
 
        ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
                                              x, y, old_fb);
-
        drm_vblank_post_modeset(dev, pipe);
 
-       intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
+       if (ret)
+               intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
+       else
+               intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
 
        return ret;
 }
 
+static bool intel_eld_uptodate(struct drm_connector *connector,
+                              int reg_eldv, uint32_t bits_eldv,
+                              int reg_elda, uint32_t bits_elda,
+                              int reg_edid)
+{
+       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       uint8_t *eld = connector->eld;
+       uint32_t i;
+
+       i = I915_READ(reg_eldv);
+       i &= bits_eldv;
+
+       if (!eld[0])
+               return !i;
+
+       if (!i)
+               return false;
+
+       i = I915_READ(reg_elda);
+       i &= ~bits_elda;
+       I915_WRITE(reg_elda, i);
+
+       for (i = 0; i < eld[2]; i++)
+               if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
+                       return false;
+
+       return true;
+}
+
 static void g4x_write_eld(struct drm_connector *connector,
                          struct drm_crtc *crtc)
 {
@@ -5846,6 +6039,12 @@ static void g4x_write_eld(struct drm_connector *connector,
        else
                eldv = G4X_ELDV_DEVCTG;
 
+       if (intel_eld_uptodate(connector,
+                              G4X_AUD_CNTL_ST, eldv,
+                              G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
+                              G4X_HDMIW_HDMIEDID))
+               return;
+
        i = I915_READ(G4X_AUD_CNTL_ST);
        i &= ~(eldv | G4X_ELD_ADDR);
        len = (i >> 9) & 0x1f;          /* ELD buffer size */
@@ -5876,14 +6075,14 @@ static void ironlake_write_eld(struct drm_connector *connector,
        int aud_cntl_st;
        int aud_cntrl_st2;
 
-       if (IS_IVYBRIDGE(connector->dev)) {
-               hdmiw_hdmiedid = GEN7_HDMIW_HDMIEDID_A;
-               aud_cntl_st = GEN7_AUD_CNTRL_ST_A;
-               aud_cntrl_st2 = GEN7_AUD_CNTRL_ST2;
+       if (HAS_PCH_IBX(connector->dev)) {
+               hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
+               aud_cntl_st = IBX_AUD_CNTL_ST_A;
+               aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
        } else {
-               hdmiw_hdmiedid = GEN5_HDMIW_HDMIEDID_A;
-               aud_cntl_st = GEN5_AUD_CNTL_ST_A;
-               aud_cntrl_st2 = GEN5_AUD_CNTL_ST2;
+               hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
+               aud_cntl_st = CPT_AUD_CNTL_ST_A;
+               aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
        }
 
        i = to_intel_crtc(crtc)->pipe;
@@ -5897,14 +6096,25 @@ static void ironlake_write_eld(struct drm_connector *connector,
        if (!i) {
                DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
                /* operate blindly on all ports */
-               eldv = GEN5_ELD_VALIDB;
-               eldv |= GEN5_ELD_VALIDB << 4;
-               eldv |= GEN5_ELD_VALIDB << 8;
+               eldv = IBX_ELD_VALIDB;
+               eldv |= IBX_ELD_VALIDB << 4;
+               eldv |= IBX_ELD_VALIDB << 8;
        } else {
                DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
-               eldv = GEN5_ELD_VALIDB << ((i - 1) * 4);
+               eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
+       }
+
+       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+               DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+               eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
        }
 
+       if (intel_eld_uptodate(connector,
+                              aud_cntrl_st2, eldv,
+                              aud_cntl_st, IBX_ELD_ADDRESS,
+                              hdmiw_hdmiedid))
+               return;
+
        i = I915_READ(aud_cntrl_st2);
        i &= ~eldv;
        I915_WRITE(aud_cntrl_st2, i);
@@ -5912,13 +6122,8 @@ static void ironlake_write_eld(struct drm_connector *connector,
        if (!eld[0])
                return;
 
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
-               DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
-               eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
-       }
-
        i = I915_READ(aud_cntl_st);
-       i &= ~GEN5_ELD_ADDRESS;
+       i &= ~IBX_ELD_ADDRESS;
        I915_WRITE(aud_cntl_st, i);
 
        len = min_t(uint8_t, eld[2], 21);       /* 84 bytes of hw ELD buffer */
@@ -6298,7 +6503,7 @@ static struct drm_display_mode load_detect_mode = {
 
 static struct drm_framebuffer *
 intel_framebuffer_create(struct drm_device *dev,
-                        struct drm_mode_fb_cmd *mode_cmd,
+                        struct drm_mode_fb_cmd2 *mode_cmd,
                         struct drm_i915_gem_object *obj)
 {
        struct intel_framebuffer *intel_fb;
@@ -6340,7 +6545,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
                                  int depth, int bpp)
 {
        struct drm_i915_gem_object *obj;
-       struct drm_mode_fb_cmd mode_cmd;
+       struct drm_mode_fb_cmd2 mode_cmd;
 
        obj = i915_gem_alloc_object(dev,
                                    intel_framebuffer_size_for_mode(mode, bpp));
@@ -6349,9 +6554,9 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
 
        mode_cmd.width = mode->hdisplay;
        mode_cmd.height = mode->vdisplay;
-       mode_cmd.depth = depth;
-       mode_cmd.bpp = bpp;
-       mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
+       mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
+                                                               bpp);
+       mode_cmd.pixel_format = 0;
 
        return intel_framebuffer_create(dev, &mode_cmd, obj);
 }
@@ -6372,11 +6577,11 @@ mode_fits_in_fbdev(struct drm_device *dev,
                return NULL;
 
        fb = &dev_priv->fbdev->ifb.base;
-       if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay,
-                                                         fb->bits_per_pixel))
+       if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
+                                                              fb->bits_per_pixel))
                return NULL;
 
-       if (obj->base.size < mode->vdisplay * fb->pitch)
+       if (obj->base.size < mode->vdisplay * fb->pitches[0])
                return NULL;
 
        return fb;
@@ -7009,7 +7214,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
                goto out;
 
        /* Offset into the new buffer for cases of shared fbs between CRTCs */
-       offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+       offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
 
        ret = BEGIN_LP_RING(6);
        if (ret)
@@ -7026,7 +7231,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
        OUT_RING(MI_NOOP);
        OUT_RING(MI_DISPLAY_FLIP |
                 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-       OUT_RING(fb->pitch);
+       OUT_RING(fb->pitches[0]);
        OUT_RING(obj->gtt_offset + offset);
        OUT_RING(MI_NOOP);
        ADVANCE_LP_RING();
@@ -7050,7 +7255,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
                goto out;
 
        /* Offset into the new buffer for cases of shared fbs between CRTCs */
-       offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+       offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
 
        ret = BEGIN_LP_RING(6);
        if (ret)
@@ -7064,7 +7269,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
        OUT_RING(MI_NOOP);
        OUT_RING(MI_DISPLAY_FLIP_I915 |
                 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-       OUT_RING(fb->pitch);
+       OUT_RING(fb->pitches[0]);
        OUT_RING(obj->gtt_offset + offset);
        OUT_RING(MI_NOOP);
 
@@ -7097,7 +7302,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
         */
        OUT_RING(MI_DISPLAY_FLIP |
                 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-       OUT_RING(fb->pitch);
+       OUT_RING(fb->pitches[0]);
        OUT_RING(obj->gtt_offset | obj->tiling_mode);
 
        /* XXX Enabling the panel-fitter across page-flip is so far
@@ -7132,7 +7337,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 
        OUT_RING(MI_DISPLAY_FLIP |
                 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-       OUT_RING(fb->pitch | obj->tiling_mode);
+       OUT_RING(fb->pitches[0] | obj->tiling_mode);
        OUT_RING(obj->gtt_offset);
 
        pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
@@ -7168,7 +7373,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
                goto out;
 
        intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
-       intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
+       intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
        intel_ring_emit(ring, (obj->gtt_offset));
        intel_ring_emit(ring, (MI_NOOP));
        intel_ring_advance(ring);
@@ -7594,7 +7799,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
 
 int intel_framebuffer_init(struct drm_device *dev,
                           struct intel_framebuffer *intel_fb,
-                          struct drm_mode_fb_cmd *mode_cmd,
+                          struct drm_mode_fb_cmd2 *mode_cmd,
                           struct drm_i915_gem_object *obj)
 {
        int ret;
@@ -7602,21 +7807,25 @@ int intel_framebuffer_init(struct drm_device *dev,
        if (obj->tiling_mode == I915_TILING_Y)
                return -EINVAL;
 
-       if (mode_cmd->pitch & 63)
+       if (mode_cmd->pitches[0] & 63)
                return -EINVAL;
 
-       switch (mode_cmd->bpp) {
-       case 8:
-       case 16:
-               /* Only pre-ILK can handle 5:5:5 */
-               if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev))
-                       return -EINVAL;
+       switch (mode_cmd->pixel_format) {
+       case DRM_FORMAT_RGB332:
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_ARGB2101010:
+               /* RGB formats are common across chipsets */
                break;
-
-       case 24:
-       case 32:
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_VYUY:
                break;
        default:
+               DRM_ERROR("unsupported pixel format\n");
                return -EINVAL;
        }
 
@@ -7634,11 +7843,12 @@ int intel_framebuffer_init(struct drm_device *dev,
 static struct drm_framebuffer *
 intel_user_framebuffer_create(struct drm_device *dev,
                              struct drm_file *filp,
-                             struct drm_mode_fb_cmd *mode_cmd)
+                             struct drm_mode_fb_cmd2 *mode_cmd)
 {
        struct drm_i915_gem_object *obj;
 
-       obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
+       obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
+                                               mode_cmd->handles[0]));
        if (&obj->base == NULL)
                return ERR_PTR(-ENOENT);
 
@@ -7995,7 +8205,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
        I915_WRITE(GEN6_RP_CONTROL,
                   GEN6_RP_MEDIA_TURBO |
-                  GEN6_RP_USE_NORMAL_FREQ |
+                  GEN6_RP_MEDIA_HW_MODE |
                   GEN6_RP_MEDIA_IS_GFX |
                   GEN6_RP_ENABLE |
                   GEN6_RP_UP_BUSY_AVG |
@@ -8250,6 +8460,10 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
 
        I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
 
+       I915_WRITE(IVB_CHICKEN3,
+                  CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+                  CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
        for_each_pipe(pipe) {
                I915_WRITE(DSPCNTR(pipe),
                           I915_READ(DSPCNTR(pipe)) |
@@ -8543,9 +8757,15 @@ static void intel_init_display(struct drm_device *dev)
                if (IS_IVYBRIDGE(dev)) {
                        u32     ecobus;
 
+                       /* A small trick here - if the bios hasn't configured MT forcewake,
+                        * and if the device is in RC6, then force_wake_mt_get will not wake
+                        * the device and the ECOBUS read will return zero. Which will be
+                        * (correctly) interpreted by the test below as MT forcewake being
+                        * disabled.
+                        */
                        mutex_lock(&dev->struct_mutex);
                        __gen6_gt_force_wake_mt_get(dev_priv);
-                       ecobus = I915_READ(ECOBUS);
+                       ecobus = I915_READ_NOTRACE(ECOBUS);
                        __gen6_gt_force_wake_mt_put(dev_priv);
                        mutex_unlock(&dev->struct_mutex);
 
@@ -8577,6 +8797,7 @@ static void intel_init_display(struct drm_device *dev)
                } else if (IS_GEN6(dev)) {
                        if (SNB_READ_WM0_LATENCY()) {
                                dev_priv->display.update_wm = sandybridge_update_wm;
+                               dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
                        } else {
                                DRM_DEBUG_KMS("Failed to read display plane latency. "
                                              "Disable CxSR\n");
@@ -8590,6 +8811,7 @@ static void intel_init_display(struct drm_device *dev)
                        dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
                        if (SNB_READ_WM0_LATENCY()) {
                                dev_priv->display.update_wm = sandybridge_update_wm;
+                               dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
                        } else {
                                DRM_DEBUG_KMS("Failed to read display plane latency. "
                                              "Disable CxSR\n");
@@ -8773,7 +8995,7 @@ static void i915_disable_vga(struct drm_device *dev)
 void intel_modeset_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int i;
+       int i, ret;
 
        drm_mode_config_init(dev);
 
@@ -8803,6 +9025,12 @@ void intel_modeset_init(struct drm_device *dev)
 
        for (i = 0; i < dev_priv->num_pipe; i++) {
                intel_crtc_init(dev, i);
+               if (HAS_PCH_SPLIT(dev)) {
+                       ret = intel_plane_init(dev, i);
+                       if (ret)
+                               DRM_ERROR("plane %d init failed: %d\n",
+                                         i, ret);
+               }
        }
 
        /* Just disable it once at startup */
index 92b041b..db3b461 100644 (file)
@@ -1926,6 +1926,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
                        intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
        }
 
+       DP &= ~DP_AUDIO_OUTPUT_ENABLE;
        I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
        POSTING_READ(intel_dp->output_reg);
        msleep(intel_dp->panel_power_down_delay);
index a1b4343..1348705 100644 (file)
@@ -26,6 +26,7 @@
 #define __INTEL_DRV_H__
 
 #include <linux/i2c.h>
+#include "i915_drm.h"
 #include "i915_drv.h"
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
@@ -39,7 +40,7 @@
                        ret__ = -ETIMEDOUT;                             \
                        break;                                          \
                }                                                       \
-               if (W && !(in_atomic() || in_dbg_master())) msleep(W);  \
+               if (W && drm_can_sleep()) msleep(W);    \
        }                                                               \
        ret__;                                                          \
 })
 #define wait_for(COND, MS) _wait_for(COND, MS, 1)
 #define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
 
-#define MSLEEP(x) do { \
-       if (in_dbg_master()) \
-               mdelay(x); \
-       else \
-               msleep(x); \
-} while (0)
-
 #define KHz(x) (1000*x)
 #define MHz(x) KHz(1000*x)
 
@@ -177,10 +171,32 @@ struct intel_crtc {
        bool use_pll_a;
 };
 
+struct intel_plane {
+       struct drm_plane base;
+       enum pipe pipe;
+       struct drm_i915_gem_object *obj;
+       bool primary_disabled;
+       int max_downscale;
+       u32 lut_r[1024], lut_g[1024], lut_b[1024];
+       void (*update_plane)(struct drm_plane *plane,
+                            struct drm_framebuffer *fb,
+                            struct drm_i915_gem_object *obj,
+                            int crtc_x, int crtc_y,
+                            unsigned int crtc_w, unsigned int crtc_h,
+                            uint32_t x, uint32_t y,
+                            uint32_t src_w, uint32_t src_h);
+       void (*disable_plane)(struct drm_plane *plane);
+       int (*update_colorkey)(struct drm_plane *plane,
+                              struct drm_intel_sprite_colorkey *key);
+       void (*get_colorkey)(struct drm_plane *plane,
+                            struct drm_intel_sprite_colorkey *key);
+};
+
 #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
 #define to_intel_connector(x) container_of(x, struct intel_connector, base)
 #define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
 #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+#define to_intel_plane(x) container_of(x, struct intel_plane, base)
 
 #define DIP_HEADER_SIZE        5
 
@@ -290,6 +306,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
 extern bool intel_dpd_is_edp(struct drm_device *dev);
 extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
 extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
+extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
 
 /* intel_panel.c */
 extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
@@ -360,7 +377,7 @@ extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
 
 extern int intel_framebuffer_init(struct drm_device *dev,
                                  struct intel_framebuffer *ifb,
-                                 struct drm_mode_fb_cmd *mode_cmd,
+                                 struct drm_mode_fb_cmd2 *mode_cmd,
                                  struct drm_i915_gem_object *obj);
 extern int intel_fbdev_init(struct drm_device *dev);
 extern void intel_fbdev_fini(struct drm_device *dev);
@@ -380,9 +397,25 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
 extern void intel_fb_output_poll_changed(struct drm_device *dev);
 extern void intel_fb_restore_mode(struct drm_device *dev);
 
+extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
+                       bool state);
+#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
+#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+
 extern void intel_init_clock_gating(struct drm_device *dev);
 extern void intel_write_eld(struct drm_encoder *encoder,
                            struct drm_display_mode *mode);
 extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
 
+/* For use by IVB LP watermark workaround in intel_sprite.c */
+extern void sandybridge_update_wm(struct drm_device *dev);
+extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+                                          uint32_t sprite_width,
+                                          int pixel_size);
+
+extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+                                    struct drm_file *file_priv);
+extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+                                    struct drm_file *file_priv);
+
 #endif /* __INTEL_DRV_H__ */
index ec49bae..571375a 100644 (file)
@@ -65,7 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct fb_info *info;
        struct drm_framebuffer *fb;
-       struct drm_mode_fb_cmd mode_cmd;
+       struct drm_mode_fb_cmd2 mode_cmd;
        struct drm_i915_gem_object *obj;
        struct device *device = &dev->pdev->dev;
        int size, ret;
@@ -77,11 +77,12 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
        mode_cmd.width = sizes->surface_width;
        mode_cmd.height = sizes->surface_height;
 
-       mode_cmd.bpp = sizes->surface_bpp;
-       mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
-       mode_cmd.depth = sizes->surface_depth;
+       mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
+                                                     8), 64);
+       mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+                                                         sizes->surface_depth);
 
-       size = mode_cmd.pitch * mode_cmd.height;
+       size = mode_cmd.pitches[0] * mode_cmd.height;
        size = ALIGN(size, PAGE_SIZE);
        obj = i915_gem_alloc_object(dev, size);
        if (!obj) {
@@ -148,7 +149,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
 
 //     memset(info->screen_base, 0, size);
 
-       drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+       drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
        drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
 
        info->pixmap.size = 64*1024;
@@ -269,8 +270,14 @@ void intel_fb_restore_mode(struct drm_device *dev)
 {
        int ret;
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_plane *plane;
 
        ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
        if (ret)
                DRM_DEBUG("failed to restore crtc mode\n");
+
+       /* Be sure to shut off any planes that may be active */
+       list_for_each_entry(plane, &config->plane_list, head)
+               plane->funcs->disable_plane(plane);
 }
index d4f5a0b..64541f7 100644 (file)
@@ -269,6 +269,10 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
        u32 temp;
+       u32 enable_bits = SDVO_ENABLE;
+
+       if (intel_hdmi->has_audio)
+               enable_bits |= SDVO_AUDIO_ENABLE;
 
        temp = I915_READ(intel_hdmi->sdvox_reg);
 
@@ -281,9 +285,9 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
        }
 
        if (mode != DRM_MODE_DPMS_ON) {
-               temp &= ~SDVO_ENABLE;
+               temp &= ~enable_bits;
        } else {
-               temp |= SDVO_ENABLE;
+               temp |= enable_bits;
        }
 
        I915_WRITE(intel_hdmi->sdvox_reg, temp);
index ca70e2f..77e729d 100644 (file)
@@ -414,6 +414,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
                        return ret;
        }
 
+       if (INTEL_INFO(dev)->gen >= 6) {
+               I915_WRITE(INSTPM,
+                          INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
+       }
+
        return ret;
 }
 
@@ -787,6 +792,17 @@ ring_add_request(struct intel_ring_buffer *ring,
 }
 
 static bool
+gen7_blt_ring_get_irq(struct intel_ring_buffer *ring)
+{
+       /* The BLT ring on IVB appears to have broken synchronization
+        * between the seqno write and the interrupt, so that the
+        * interrupt appears first.  Returning false here makes
+        * i915_wait_request() do a polling loop, instead.
+        */
+       return false;
+}
+
+static bool
 gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
 {
        struct drm_device *dev = ring->dev;
@@ -1119,7 +1135,16 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
        }
 
        trace_i915_ring_wait_begin(ring);
-       end = jiffies + 3 * HZ;
+       if (drm_core_check_feature(dev, DRIVER_GEM))
+               /* With GEM the hangcheck timer should kick us out of the loop,
+                * leaving it early runs the risk of corrupting GEM state (due
+                * to running on almost untested codepaths). But on resume
+                * timers don't work yet, so prevent a complete hang in that
+                * case by choosing an insanely large timeout. */
+               end = jiffies + 60 * HZ;
+       else
+               end = jiffies + 3 * HZ;
+
        do {
                ring->head = I915_READ_HEAD(ring);
                ring->space = ring_space(ring);
@@ -1552,5 +1577,8 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
 
        *ring = gen6_blt_ring;
 
+       if (IS_GEN7(dev))
+               ring->irq_get = gen7_blt_ring_get_irq;
+
        return intel_init_ring_buffer(dev, ring);
 }
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
new file mode 100644 (file)
index 0000000..d13989f
--- /dev/null
@@ -0,0 +1,668 @@
+/*
+ * Copyright Â© 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *   Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ * New plane/sprite handling.
+ *
+ * The older chips had a separate interface for programming plane related
+ * registers; newer ones are much simpler and we can use the new DRM plane
+ * support.
+ */
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_fourcc.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+static void
+ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+                struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+                unsigned int crtc_w, unsigned int crtc_h,
+                uint32_t x, uint32_t y,
+                uint32_t src_w, uint32_t src_h)
+{
+       struct drm_device *dev = plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       int pipe = intel_plane->pipe;
+       u32 sprctl, sprscale = 0;
+       int pixel_size;
+
+       sprctl = I915_READ(SPRCTL(pipe));
+
+       /* Mask out pixel format bits in case we change it */
+       sprctl &= ~SPRITE_PIXFORMAT_MASK;
+       sprctl &= ~SPRITE_RGB_ORDER_RGBX;
+       sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
+
+       switch (fb->pixel_format) {
+       case DRM_FORMAT_XBGR8888:
+               sprctl |= SPRITE_FORMAT_RGBX888;
+               pixel_size = 4;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
+               pixel_size = 4;
+               break;
+       case DRM_FORMAT_YUYV:
+               sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
+               pixel_size = 2;
+               break;
+       case DRM_FORMAT_YVYU:
+               sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
+               pixel_size = 2;
+               break;
+       case DRM_FORMAT_UYVY:
+               sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
+               pixel_size = 2;
+               break;
+       case DRM_FORMAT_VYUY:
+               sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
+               pixel_size = 2;
+               break;
+       default:
+               DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
+               sprctl |= DVS_FORMAT_RGBX888;
+               pixel_size = 4;
+               break;
+       }
+
+       if (obj->tiling_mode != I915_TILING_NONE)
+               sprctl |= SPRITE_TILED;
+
+       /* must disable */
+       sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+       sprctl |= SPRITE_ENABLE;
+       sprctl |= SPRITE_DEST_KEY;
+
+       /* Sizes are 0 based */
+       src_w--;
+       src_h--;
+       crtc_w--;
+       crtc_h--;
+
+       intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
+
+       /*
+        * IVB workaround: must disable low power watermarks for at least
+        * one frame before enabling scaling.  LP watermarks can be re-enabled
+        * when scaling is disabled.
+        */
+       if (crtc_w != src_w || crtc_h != src_h) {
+               dev_priv->sprite_scaling_enabled = true;
+               sandybridge_update_wm(dev);
+               intel_wait_for_vblank(dev, pipe);
+               sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
+       } else {
+               dev_priv->sprite_scaling_enabled = false;
+               /* potentially re-enable LP watermarks */
+               sandybridge_update_wm(dev);
+       }
+
+       I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
+       I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+       if (obj->tiling_mode != I915_TILING_NONE) {
+               I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
+       } else {
+               unsigned long offset;
+
+               offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+               I915_WRITE(SPRLINOFF(pipe), offset);
+       }
+       I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+       I915_WRITE(SPRSCALE(pipe), sprscale);
+       I915_WRITE(SPRCTL(pipe), sprctl);
+       I915_WRITE(SPRSURF(pipe), obj->gtt_offset);
+       POSTING_READ(SPRSURF(pipe));
+}
+
+static void
+ivb_disable_plane(struct drm_plane *plane)
+{
+       struct drm_device *dev = plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       int pipe = intel_plane->pipe;
+
+       I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
+       /* Can't leave the scaler enabled... */
+       I915_WRITE(SPRSCALE(pipe), 0);
+       /* Activate double buffered register update */
+       I915_WRITE(SPRSURF(pipe), 0);
+       POSTING_READ(SPRSURF(pipe));
+}
+
+static int
+ivb_update_colorkey(struct drm_plane *plane,
+                   struct drm_intel_sprite_colorkey *key)
+{
+       struct drm_device *dev = plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane;
+       u32 sprctl;
+       int ret = 0;
+
+       intel_plane = to_intel_plane(plane);
+
+       I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value);
+       I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value);
+       I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask);
+
+       sprctl = I915_READ(SPRCTL(intel_plane->pipe));
+       sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY);
+       if (key->flags & I915_SET_COLORKEY_DESTINATION)
+               sprctl |= SPRITE_DEST_KEY;
+       else if (key->flags & I915_SET_COLORKEY_SOURCE)
+               sprctl |= SPRITE_SOURCE_KEY;
+       I915_WRITE(SPRCTL(intel_plane->pipe), sprctl);
+
+       POSTING_READ(SPRKEYMSK(intel_plane->pipe));
+
+       return ret;
+}
+
+static void
+ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+{
+       struct drm_device *dev = plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane;
+       u32 sprctl;
+
+       intel_plane = to_intel_plane(plane);
+
+       key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe));
+       key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe));
+       key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe));
+       key->flags = 0;
+
+       sprctl = I915_READ(SPRCTL(intel_plane->pipe));
+
+       if (sprctl & SPRITE_DEST_KEY)
+               key->flags = I915_SET_COLORKEY_DESTINATION;
+       else if (sprctl & SPRITE_SOURCE_KEY)
+               key->flags = I915_SET_COLORKEY_SOURCE;
+       else
+               key->flags = I915_SET_COLORKEY_NONE;
+}
+
+static void
+snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+                struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+                unsigned int crtc_w, unsigned int crtc_h,
+                uint32_t x, uint32_t y,
+                uint32_t src_w, uint32_t src_h)
+{
+       struct drm_device *dev = plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       int pipe = intel_plane->pipe, pixel_size;
+       u32 dvscntr, dvsscale = 0;
+
+       dvscntr = I915_READ(DVSCNTR(pipe));
+
+       /* Mask out pixel format bits in case we change it */
+       dvscntr &= ~DVS_PIXFORMAT_MASK;
+       dvscntr &= ~DVS_RGB_ORDER_RGBX;
+       dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
+
+       switch (fb->pixel_format) {
+       case DRM_FORMAT_XBGR8888:
+               dvscntr |= DVS_FORMAT_RGBX888;
+               pixel_size = 4;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_RGBX;
+               pixel_size = 4;
+               break;
+       case DRM_FORMAT_YUYV:
+               dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
+               pixel_size = 2;
+               break;
+       case DRM_FORMAT_YVYU:
+               dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
+               pixel_size = 2;
+               break;
+       case DRM_FORMAT_UYVY:
+               dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
+               pixel_size = 2;
+               break;
+       case DRM_FORMAT_VYUY:
+               dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
+               pixel_size = 2;
+               break;
+       default:
+               DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
+               dvscntr |= DVS_FORMAT_RGBX888;
+               pixel_size = 4;
+               break;
+       }
+
+       if (obj->tiling_mode != I915_TILING_NONE)
+               dvscntr |= DVS_TILED;
+
+       /* must disable */
+       dvscntr |= DVS_TRICKLE_FEED_DISABLE;
+       dvscntr |= DVS_ENABLE;
+
+       /* Sizes are 0 based */
+       src_w--;
+       src_h--;
+       crtc_w--;
+       crtc_h--;
+
+       intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
+
+       if (crtc_w != src_w || crtc_h != src_h)
+               dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
+
+       I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
+       I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+       if (obj->tiling_mode != I915_TILING_NONE) {
+               I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
+       } else {
+               unsigned long offset;
+
+               offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+               I915_WRITE(DVSLINOFF(pipe), offset);
+       }
+       I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+       I915_WRITE(DVSSCALE(pipe), dvsscale);
+       I915_WRITE(DVSCNTR(pipe), dvscntr);
+       I915_WRITE(DVSSURF(pipe), obj->gtt_offset);
+       POSTING_READ(DVSSURF(pipe));
+}
+
+static void
+snb_disable_plane(struct drm_plane *plane)
+{
+       struct drm_device *dev = plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       int pipe = intel_plane->pipe;
+
+       I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
+       /* Disable the scaler */
+       I915_WRITE(DVSSCALE(pipe), 0);
+       /* Flush double buffered register updates */
+       I915_WRITE(DVSSURF(pipe), 0);
+       POSTING_READ(DVSSURF(pipe));
+}
+
+static void
+intel_enable_primary(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int reg = DSPCNTR(intel_crtc->plane);
+
+       I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
+}
+
+static void
+intel_disable_primary(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int reg = DSPCNTR(intel_crtc->plane);
+
+       I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+}
+
+static int
+snb_update_colorkey(struct drm_plane *plane,
+                   struct drm_intel_sprite_colorkey *key)
+{
+       struct drm_device *dev = plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane;
+       u32 dvscntr;
+       int ret = 0;
+
+       intel_plane = to_intel_plane(plane);
+
+       I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value);
+       I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value);
+       I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask);
+
+       dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
+       dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY);
+       if (key->flags & I915_SET_COLORKEY_DESTINATION)
+               dvscntr |= DVS_DEST_KEY;
+       else if (key->flags & I915_SET_COLORKEY_SOURCE)
+               dvscntr |= DVS_SOURCE_KEY;
+       I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr);
+
+       POSTING_READ(DVSKEYMSK(intel_plane->pipe));
+
+       return ret;
+}
+
+static void
+snb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+{
+       struct drm_device *dev = plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane;
+       u32 dvscntr;
+
+       intel_plane = to_intel_plane(plane);
+
+       key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe));
+       key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe));
+       key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe));
+       key->flags = 0;
+
+       dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
+
+       if (dvscntr & DVS_DEST_KEY)
+               key->flags = I915_SET_COLORKEY_DESTINATION;
+       else if (dvscntr & DVS_SOURCE_KEY)
+               key->flags = I915_SET_COLORKEY_SOURCE;
+       else
+               key->flags = I915_SET_COLORKEY_NONE;
+}
+
+static int
+intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+                  struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+                  unsigned int crtc_w, unsigned int crtc_h,
+                  uint32_t src_x, uint32_t src_y,
+                  uint32_t src_w, uint32_t src_h)
+{
+       struct drm_device *dev = plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       struct intel_framebuffer *intel_fb;
+       struct drm_i915_gem_object *obj, *old_obj;
+       int pipe = intel_plane->pipe;
+       int ret = 0;
+       int x = src_x >> 16, y = src_y >> 16;
+       int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
+       bool disable_primary = false;
+
+       intel_fb = to_intel_framebuffer(fb);
+       obj = intel_fb->obj;
+
+       old_obj = intel_plane->obj;
+
+       /* Pipe must be running... */
+       if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
+               return -EINVAL;
+
+       if (crtc_x >= primary_w || crtc_y >= primary_h)
+               return -EINVAL;
+
+       /* Don't modify another pipe's plane */
+       if (intel_plane->pipe != intel_crtc->pipe)
+               return -EINVAL;
+
+       /*
+        * Clamp the width & height into the visible area.  Note we don't
+        * try to scale the source if part of the visible region is offscreen.
+        * The caller must handle that by adjusting source offset and size.
+        */
+       if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) {
+               crtc_w += crtc_x;
+               crtc_x = 0;
+       }
+       if ((crtc_x + crtc_w) <= 0) /* Nothing to display */
+               goto out;
+       if ((crtc_x + crtc_w) > primary_w)
+               crtc_w = primary_w - crtc_x;
+
+       if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) {
+               crtc_h += crtc_y;
+               crtc_y = 0;
+       }
+       if ((crtc_y + crtc_h) <= 0) /* Nothing to display */
+               goto out;
+       if (crtc_y + crtc_h > primary_h)
+               crtc_h = primary_h - crtc_y;
+
+       if (!crtc_w || !crtc_h) /* Again, nothing to display */
+               goto out;
+
+       /*
+        * We can take a larger source and scale it down, but
+        * only so much...  16x is the max on SNB.
+        */
+       if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale)
+               return -EINVAL;
+
+       /*
+        * If the sprite is completely covering the primary plane,
+        * we can disable the primary and save power.
+        */
+       if ((crtc_x == 0) && (crtc_y == 0) &&
+           (crtc_w == primary_w) && (crtc_h == primary_h))
+               disable_primary = true;
+
+       mutex_lock(&dev->struct_mutex);
+
+       ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+       if (ret) {
+               DRM_ERROR("failed to pin object\n");
+               goto out_unlock;
+       }
+
+       intel_plane->obj = obj;
+
+       /*
+        * Be sure to re-enable the primary before the sprite is no longer
+        * covering it fully.
+        */
+       if (!disable_primary && intel_plane->primary_disabled) {
+               intel_enable_primary(crtc);
+               intel_plane->primary_disabled = false;
+       }
+
+       intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
+                                 crtc_w, crtc_h, x, y, src_w, src_h);
+
+       if (disable_primary) {
+               intel_disable_primary(crtc);
+               intel_plane->primary_disabled = true;
+       }
+
+       /* Unpin old obj after new one is active to avoid ugliness */
+       if (old_obj) {
+               /*
+                * It's fairly common to simply update the position of
+                * an existing object.  In that case, we don't need to
+                * wait for vblank to avoid ugliness, we only need to
+                * do the pin & ref bookkeeping.
+                */
+               if (old_obj != obj) {
+                       mutex_unlock(&dev->struct_mutex);
+                       intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
+                       mutex_lock(&dev->struct_mutex);
+               }
+               i915_gem_object_unpin(old_obj);
+       }
+
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
+out:
+       return ret;
+}
+
+static int
+intel_disable_plane(struct drm_plane *plane)
+{
+       struct drm_device *dev = plane->dev;
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       int ret = 0;
+
+       if (intel_plane->primary_disabled) {
+               intel_enable_primary(plane->crtc);
+               intel_plane->primary_disabled = false;
+       }
+
+       intel_plane->disable_plane(plane);
+
+       if (!intel_plane->obj)
+               goto out;
+
+       mutex_lock(&dev->struct_mutex);
+       i915_gem_object_unpin(intel_plane->obj);
+       intel_plane->obj = NULL;
+       mutex_unlock(&dev->struct_mutex);
+out:
+
+       return ret;
+}
+
+static void intel_destroy_plane(struct drm_plane *plane)
+{
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       intel_disable_plane(plane);
+       drm_plane_cleanup(plane);
+       kfree(intel_plane);
+}
+
+int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv)
+{
+       struct drm_intel_sprite_colorkey *set = data;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_mode_object *obj;
+       struct drm_plane *plane;
+       struct intel_plane *intel_plane;
+       int ret = 0;
+
+       if (!dev_priv)
+               return -EINVAL;
+
+       /* Make sure we don't try to enable both src & dest simultaneously */
+       if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+               return -EINVAL;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       plane = obj_to_plane(obj);
+       intel_plane = to_intel_plane(plane);
+       ret = intel_plane->update_colorkey(plane, set);
+
+out_unlock:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv)
+{
+       struct drm_intel_sprite_colorkey *get = data;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_mode_object *obj;
+       struct drm_plane *plane;
+       struct intel_plane *intel_plane;
+       int ret = 0;
+
+       if (!dev_priv)
+               return -EINVAL;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       plane = obj_to_plane(obj);
+       intel_plane = to_intel_plane(plane);
+       intel_plane->get_colorkey(plane, get);
+
+out_unlock:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+static const struct drm_plane_funcs intel_plane_funcs = {
+       .update_plane = intel_update_plane,
+       .disable_plane = intel_disable_plane,
+       .destroy = intel_destroy_plane,
+};
+
+static uint32_t snb_plane_formats[] = {
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_YUYV,
+       DRM_FORMAT_YVYU,
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_VYUY,
+};
+
+int
+intel_plane_init(struct drm_device *dev, enum pipe pipe)
+{
+       struct intel_plane *intel_plane;
+       unsigned long possible_crtcs;
+       int ret;
+
+       if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
+               DRM_ERROR("new plane code only for SNB+\n");
+               return -ENODEV;
+       }
+
+       intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
+       if (!intel_plane)
+               return -ENOMEM;
+
+       if (IS_GEN6(dev)) {
+               intel_plane->max_downscale = 16;
+               intel_plane->update_plane = snb_update_plane;
+               intel_plane->disable_plane = snb_disable_plane;
+               intel_plane->update_colorkey = snb_update_colorkey;
+               intel_plane->get_colorkey = snb_get_colorkey;
+       } else if (IS_GEN7(dev)) {
+               intel_plane->max_downscale = 2;
+               intel_plane->update_plane = ivb_update_plane;
+               intel_plane->disable_plane = ivb_disable_plane;
+               intel_plane->update_colorkey = ivb_update_colorkey;
+               intel_plane->get_colorkey = ivb_get_colorkey;
+       }
+
+       intel_plane->pipe = pipe;
+       possible_crtcs = (1 << pipe);
+       ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
+                            &intel_plane_funcs, snb_plane_formats,
+                            ARRAY_SIZE(snb_plane_formats), false);
+       if (ret)
+               kfree(intel_plane);
+
+       return ret;
+}
+
index 33daa29..f9a925d 100644 (file)
@@ -44,6 +44,20 @@ static struct pci_device_id pciidlist[] = {
        mga_PCI_IDS
 };
 
+static const struct file_operations mga_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = drm_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = mga_compat_ioctl,
+#endif
+       .llseek = noop_llseek,
+};
+
 static struct drm_driver driver = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
@@ -64,20 +78,7 @@ static struct drm_driver driver = {
        .reclaim_buffers = drm_core_reclaim_buffers,
        .ioctls = mga_ioctls,
        .dma_ioctl = mga_dma_buffers,
-       .fops = {
-               .owner = THIS_MODULE,
-               .open = drm_open,
-               .release = drm_release,
-               .unlocked_ioctl = drm_ioctl,
-               .mmap = drm_mmap,
-               .poll = drm_poll,
-               .fasync = drm_fasync,
-#ifdef CONFIG_COMPAT
-               .compat_ioctl = mga_compat_ioctl,
-#endif
-               .llseek = noop_llseek,
-       },
-
+       .fops = &mga_driver_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = DRIVER_DATE,
index 35ef5b1..9f27e3d 100644 (file)
@@ -9,9 +9,9 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
              nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
              nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
              nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
-             nouveau_dp.o nouveau_ramht.o \
+             nouveau_hdmi.o nouveau_dp.o nouveau_ramht.o \
             nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
-            nouveau_mm.o nouveau_vm.o \
+            nouveau_mm.o nouveau_vm.o nouveau_mxm.o nouveau_gpio.o \
              nv04_timer.o \
              nv04_mc.o nv40_mc.o nv50_mc.o \
              nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
@@ -19,9 +19,12 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
              nv04_graph.o nv10_graph.o nv20_graph.o \
              nv40_graph.o nv50_graph.o nvc0_graph.o \
              nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
-             nv84_crypt.o \
+             nv84_crypt.o nv98_crypt.o \
              nva3_copy.o nvc0_copy.o \
              nv31_mpeg.o nv50_mpeg.o \
+             nv84_bsp.o \
+             nv84_vp.o \
+             nv98_ppp.o \
              nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
              nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
              nv04_crtc.o nv04_display.o nv04_cursor.o \
index 5fc201b..e5cbead 100644 (file)
@@ -27,6 +27,7 @@
 #include "nouveau_drv.h"
 #include "nouveau_hw.h"
 #include "nouveau_encoder.h"
+#include "nouveau_gpio.h"
 
 #include <linux/io-mapping.h>
 
@@ -34,9 +35,6 @@
 #define NV_CIO_CRE_44_HEADA 0x0
 #define NV_CIO_CRE_44_HEADB 0x3
 #define FEATURE_MOBILE 0x10    /* also FEATURE_QUADRO for BMP */
-#define LEGACY_I2C_CRT 0x80
-#define LEGACY_I2C_PANEL 0x81
-#define LEGACY_I2C_TV 0x82
 
 #define EDID1_LEN 128
 
@@ -723,115 +721,19 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
        return dcb_entry;
 }
 
-static int
-read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
-{
-       uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
-       int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
-       int recordoffset = 0, rdofs = 1, wrofs = 0;
-       uint8_t port_type = 0;
-
-       if (!i2ctable)
-               return -EINVAL;
-
-       if (dcb_version >= 0x30) {
-               if (i2ctable[0] != dcb_version) /* necessary? */
-                       NV_WARN(dev,
-                               "DCB I2C table version mismatch (%02X vs %02X)\n",
-                               i2ctable[0], dcb_version);
-               dcb_i2c_ver = i2ctable[0];
-               headerlen = i2ctable[1];
-               if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
-                       i2c_entries = i2ctable[2];
-               else
-                       NV_WARN(dev,
-                               "DCB I2C table has more entries than indexable "
-                               "(%d entries, max %d)\n", i2ctable[2],
-                               DCB_MAX_NUM_I2C_ENTRIES);
-               entry_len = i2ctable[3];
-               /* [4] is i2c_default_indices, read in parse_dcb_table() */
-       }
-       /*
-        * It's your own fault if you call this function on a DCB 1.1 BIOS --
-        * the test below is for DCB 1.2
-        */
-       if (dcb_version < 0x14) {
-               recordoffset = 2;
-               rdofs = 0;
-               wrofs = 1;
-       }
-
-       if (index == 0xf)
-               return 0;
-       if (index >= i2c_entries) {
-               NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
-                        index, i2ctable[2]);
-               return -ENOENT;
-       }
-       if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
-               NV_ERROR(dev, "DCB I2C entry invalid\n");
-               return -EINVAL;
-       }
-
-       if (dcb_i2c_ver >= 0x30) {
-               port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
-
-               /*
-                * Fixup for chips using same address offset for read and
-                * write.
-                */
-               if (port_type == 4)     /* seen on C51 */
-                       rdofs = wrofs = 1;
-               if (port_type >= 5)     /* G80+ */
-                       rdofs = wrofs = 0;
-       }
-
-       if (dcb_i2c_ver >= 0x40) {
-               if (port_type != 5 && port_type != 6)
-                       NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
-
-               i2c->entry = ROM32(i2ctable[headerlen + recordoffset + entry_len * index]);
-       }
-
-       i2c->port_type = port_type;
-       i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
-       i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
-
-       return 0;
-}
-
 static struct nouveau_i2c_chan *
 init_i2c_device_find(struct drm_device *dev, int i2c_index)
 {
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct dcb_table *dcb = &dev_priv->vbios.dcb;
-
        if (i2c_index == 0xff) {
+               struct drm_nouveau_private *dev_priv = dev->dev_private;
+               struct dcb_table *dcb = &dev_priv->vbios.dcb;
                /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
-               int idx = dcb_entry_idx_from_crtchead(dev), shift = 0;
-               int default_indices = dcb->i2c_default_indices;
+               int idx = dcb_entry_idx_from_crtchead(dev);
 
+               i2c_index = NV_I2C_DEFAULT(0);
                if (idx != 0x7f && dcb->entry[idx].i2c_upper_default)
-                       shift = 4;
-
-               i2c_index = (default_indices >> shift) & 0xf;
+                       i2c_index = NV_I2C_DEFAULT(1);
        }
-       if (i2c_index == 0x80)  /* g80+ */
-               i2c_index = dcb->i2c_default_indices & 0xf;
-       else
-       if (i2c_index == 0x81)
-               i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4;
-
-       if (i2c_index >= DCB_MAX_NUM_I2C_ENTRIES) {
-               NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index);
-               return NULL;
-       }
-
-       /* Make sure i2c table entry has been parsed, it may not
-        * have been if this is a bus not referenced by a DCB encoder
-        */
-       read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
-                          i2c_index, &dcb->i2c[i2c_index]);
 
        return nouveau_i2c_find(dev, i2c_index);
 }
@@ -1199,13 +1101,9 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 
        switch (cond) {
        case 0:
-       {
-               struct dcb_connector_table_entry *ent =
-                       &bios->dcb.connector.entry[dcb->connector];
-
-               if (ent->type != DCB_CONNECTOR_eDP)
+               entry = dcb_conn(dev, dcb->connector);
+               if (!entry || entry[0] != DCB_CONNECTOR_eDP)
                        iexec->execute = false;
-       }
                break;
        case 1:
        case 2:
@@ -3227,49 +3125,6 @@ init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
        return 1;
 }
 
-static void
-init_gpio_unknv50(struct nvbios *bios, struct dcb_gpio_entry *gpio)
-{
-       const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
-       u32 r, s, v;
-
-       /* Not a clue, needs de-magicing */
-       r = nv50_gpio_ctl[gpio->line >> 4];
-       s = (gpio->line & 0x0f);
-       v = bios_rd32(bios, r) & ~(0x00010001 << s);
-       switch ((gpio->entry & 0x06000000) >> 25) {
-       case 1:
-               v |= (0x00000001 << s);
-               break;
-       case 2:
-               v |= (0x00010000 << s);
-               break;
-       default:
-               break;
-       }
-
-       bios_wr32(bios, r, v);
-}
-
-static void
-init_gpio_unknvd0(struct nvbios *bios, struct dcb_gpio_entry *gpio)
-{
-       u32 v, i;
-
-       v  = bios_rd32(bios, 0x00d610 + (gpio->line * 4));
-       v &= 0xffffff00;
-       v |= (gpio->entry & 0x00ff0000) >> 16;
-       bios_wr32(bios, 0x00d610 + (gpio->line * 4), v);
-
-       i = (gpio->entry & 0x1f000000) >> 24;
-       if (i) {
-               v  = bios_rd32(bios, 0x00d640 + ((i - 1) * 4));
-               v &= 0xffffff00;
-               v |= gpio->line;
-               bios_wr32(bios, 0x00d640 + ((i - 1) * 4), v);
-       }
-}
-
 static int
 init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 {
@@ -3282,35 +3137,8 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
         * each GPIO according to various values listed in each entry
         */
 
-       struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
-       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-       int i;
-
-       if (dev_priv->card_type < NV_50) {
-               NV_ERROR(bios->dev, "INIT_GPIO on unsupported chipset\n");
-               return 1;
-       }
-
-       if (!iexec->execute)
-               return 1;
-
-       for (i = 0; i < bios->dcb.gpio.entries; i++) {
-               struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i];
-
-               BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry);
-
-               BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n",
-                       offset, gpio->tag, gpio->state_default);
-
-               if (!bios->execute)
-                       continue;
-
-               pgpio->set(bios->dev, gpio->tag, gpio->state_default);
-               if (dev_priv->card_type < NV_D0)
-                       init_gpio_unknv50(bios, gpio);
-               else
-                       init_gpio_unknvd0(bios, gpio);
-       }
+       if (iexec->execute && bios->execute)
+               nouveau_gpio_reset(bios->dev);
 
        return 1;
 }
@@ -4407,18 +4235,6 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
                break;
        }
 
-       /* Dell Latitude D620 reports a too-high value for the dual-link
-        * transition freq, causing us to program the panel incorrectly.
-        *
-        * It doesn't appear the VBIOS actually uses its transition freq
-        * (90000kHz), instead it uses the "Number of LVDS channels" field
-        * out of the panel ID structure (http://www.spwg.org/).
-        *
-        * For the moment, a quirk will do :)
-        */
-       if (nv_match_device(dev, 0x01d7, 0x1028, 0x01c2))
-               bios->fp.duallink_transition_clk = 80000;
-
        /* set dual_link flag for EDID case */
        if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
                bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk);
@@ -4541,7 +4357,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
        NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n",
                        dcbent->type, dcbent->location, dcbent->or);
        for (i = 0; i < table[3]; i++) {
-               otable = ROMPTR(bios, table[table[1] + (i * table[2])]);
+               otable = ROMPTR(dev, table[table[1] + (i * table[2])]);
                if (otable && bios_encoder_match(dcbent, ROM32(otable[0])))
                        break;
        }
@@ -4719,7 +4535,7 @@ static struct pll_mapping nv84_pll_mapping[] = {
        { PLL_CORE  , 0x004028 },
        { PLL_SHADER, 0x004020 },
        { PLL_MEMORY, 0x004008 },
-       { PLL_UNK05 , 0x004030 },
+       { PLL_VDEC  , 0x004030 },
        { PLL_UNK41 , 0x00e818 },
        { PLL_VPLL0 , 0x614100 },
        { PLL_VPLL1 , 0x614900 },
@@ -5485,6 +5301,9 @@ bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
        struct nvbios *bios = &dev_priv->vbios;
        u8 entries, *entry;
 
+       if (bios->type != NVBIOS_BIT)
+               return -ENODEV;
+
        entries = bios->data[bios->offset + 10];
        entry   = &bios->data[bios->offset + 12];
        while (entries--) {
@@ -5493,7 +5312,7 @@ bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
                        bit->version = entry[1];
                        bit->length = ROM16(entry[2]);
                        bit->offset = ROM16(entry[4]);
-                       bit->data = ROMPTR(bios, entry[4]);
+                       bit->data = ROMPTR(dev, entry[4]);
                        return 0;
                }
 
@@ -5598,10 +5417,6 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
        uint16_t legacy_scripts_offset, legacy_i2c_offset;
 
        /* load needed defaults in case we can't parse this info */
-       bios->dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
-       bios->dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
-       bios->dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
-       bios->dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
        bios->digital_min_front_porch = 0x4b;
        bios->fmaxvco = 256000;
        bios->fminvco = 128000;
@@ -5709,14 +5524,6 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
        bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
        bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
        bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
-       if (bios->data[legacy_i2c_offset + 4])
-               bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
-       if (bios->data[legacy_i2c_offset + 5])
-               bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
-       if (bios->data[legacy_i2c_offset + 6])
-               bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
-       if (bios->data[legacy_i2c_offset + 7])
-               bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
 
        if (bmplength > 74) {
                bios->fmaxvco = ROM32(bmp[67]);
@@ -5767,286 +5574,128 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
        return 0;
 }
 
-static struct dcb_gpio_entry *
-new_gpio_entry(struct nvbios *bios)
-{
-       struct drm_device *dev = bios->dev;
-       struct dcb_gpio_table *gpio = &bios->dcb.gpio;
-
-       if (gpio->entries >= DCB_MAX_NUM_GPIO_ENTRIES) {
-               NV_ERROR(dev, "exceeded maximum number of gpio entries!!\n");
-               return NULL;
-       }
-
-       return &gpio->entry[gpio->entries++];
-}
-
-struct dcb_gpio_entry *
-nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
+void *
+dcb_table(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nvbios *bios = &dev_priv->vbios;
-       int i;
-
-       for (i = 0; i < bios->dcb.gpio.entries; i++) {
-               if (bios->dcb.gpio.entry[i].tag != tag)
-                       continue;
+       u8 *dcb = NULL;
 
-               return &bios->dcb.gpio.entry[i];
+       if (dev_priv->card_type > NV_04)
+               dcb = ROMPTR(dev, dev_priv->vbios.data[0x36]);
+       if (!dcb) {
+               NV_WARNONCE(dev, "No DCB data found in VBIOS\n");
+               return NULL;
        }
 
-       return NULL;
-}
-
-static void
-parse_dcb_gpio_table(struct nvbios *bios)
-{
-       struct drm_device *dev = bios->dev;
-       struct dcb_gpio_entry *e;
-       u8 headerlen, entries, recordlen;
-       u8 *dcb, *gpio = NULL, *entry;
-       int i;
-
-       dcb = ROMPTR(bios, bios->data[0x36]);
+       if (dcb[0] >= 0x41) {
+               NV_WARNONCE(dev, "DCB version 0x%02x unknown\n", dcb[0]);
+               return NULL;
+       } else
        if (dcb[0] >= 0x30) {
-               gpio = ROMPTR(bios, dcb[10]);
-               if (!gpio)
-                       goto no_table;
-
-               headerlen = gpio[1];
-               entries   = gpio[2];
-               recordlen = gpio[3];
+               if (ROM32(dcb[6]) == 0x4edcbdcb)
+                       return dcb;
        } else
-       if (dcb[0] >= 0x22 && dcb[-1] >= 0x13) {
-               gpio = ROMPTR(bios, dcb[-15]);
-               if (!gpio)
-                       goto no_table;
-
-               headerlen = 3;
-               entries   = gpio[2];
-               recordlen = gpio[1];
+       if (dcb[0] >= 0x20) {
+               if (ROM32(dcb[4]) == 0x4edcbdcb)
+                       return dcb;
        } else
-       if (dcb[0] >= 0x22) {
-               /* No GPIO table present, parse the TVDAC GPIO data. */
-               uint8_t *tvdac_gpio = &dcb[-5];
-
-               if (tvdac_gpio[0] & 1) {
-                       e = new_gpio_entry(bios);
-                       e->tag = DCB_GPIO_TVDAC0;
-                       e->line = tvdac_gpio[1] >> 4;
-                       e->invert = tvdac_gpio[0] & 2;
-               }
-
-               goto no_table;
+       if (dcb[0] >= 0x15) {
+               if (!memcmp(&dcb[-7], "DEV_REC", 7))
+                       return dcb;
        } else {
-               NV_DEBUG(dev, "no/unknown gpio table on DCB 0x%02x\n", dcb[0]);
-               goto no_table;
-       }
-
-       entry = gpio + headerlen;
-       for (i = 0; i < entries; i++, entry += recordlen) {
-               e = new_gpio_entry(bios);
-               if (!e)
-                       break;
-
-               if (gpio[0] < 0x40) {
-                       e->entry = ROM16(entry[0]);
-                       e->tag = (e->entry & 0x07e0) >> 5;
-                       if (e->tag == 0x3f) {
-                               bios->dcb.gpio.entries--;
-                               continue;
-                       }
-
-                       e->line = (e->entry & 0x001f);
-                       e->invert = ((e->entry & 0xf800) >> 11) != 4;
-               } else {
-                       e->entry = ROM32(entry[0]);
-                       e->tag = (e->entry & 0x0000ff00) >> 8;
-                       if (e->tag == 0xff) {
-                               bios->dcb.gpio.entries--;
-                               continue;
-                       }
-
-                       e->line = (e->entry & 0x0000001f) >> 0;
-                       if (gpio[0] == 0x40) {
-                               e->state_default = (e->entry & 0x01000000) >> 24;
-                               e->state[0] = (e->entry & 0x18000000) >> 27;
-                               e->state[1] = (e->entry & 0x60000000) >> 29;
-                       } else {
-                               e->state_default = (e->entry & 0x00000080) >> 7;
-                               e->state[0] = (entry[4] >> 4) & 3;
-                               e->state[1] = (entry[4] >> 6) & 3;
-                       }
-               }
-       }
-
-no_table:
-       /* Apple iMac G4 NV18 */
-       if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
-               e = new_gpio_entry(bios);
-               if (e) {
-                       e->tag = DCB_GPIO_TVDAC0;
-                       e->line = 4;
-               }
-       }
-}
-
-struct dcb_connector_table_entry *
-nouveau_bios_connector_entry(struct drm_device *dev, int index)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nvbios *bios = &dev_priv->vbios;
-       struct dcb_connector_table_entry *cte;
-
-       if (index >= bios->dcb.connector.entries)
-               return NULL;
-
-       cte = &bios->dcb.connector.entry[index];
-       if (cte->type == 0xff)
+               /*
+                * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but
+                * always has the same single (crt) entry, even when tv-out
+                * present, so the conclusion is this version cannot really
+                * be used.
+                *
+                * v1.2 tables (some NV6/10, and NV15+) normally have the
+                * same 5 entries, which are not specific to the card and so
+                * no use.
+                *
+                * v1.2 does have an I2C table that read_dcb_i2c_table can
+                * handle, but cards exist (nv11 in #14821) with a bad i2c
+                * table pointer, so use the indices parsed in
+                * parse_bmp_structure.
+                *
+                * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
+                */
+               NV_WARNONCE(dev, "No useful DCB data in VBIOS\n");
                return NULL;
+       }
 
-       return cte;
+       NV_WARNONCE(dev, "DCB header validation failed\n");
+       return NULL;
 }
 
-static enum dcb_connector_type
-divine_connector_type(struct nvbios *bios, int index)
+void *
+dcb_outp(struct drm_device *dev, u8 idx)
 {
-       struct dcb_table *dcb = &bios->dcb;
-       unsigned encoders = 0, type = DCB_CONNECTOR_NONE;
-       int i;
-
-       for (i = 0; i < dcb->entries; i++) {
-               if (dcb->entry[i].connector == index)
-                       encoders |= (1 << dcb->entry[i].type);
-       }
-
-       if (encoders & (1 << OUTPUT_DP)) {
-               if (encoders & (1 << OUTPUT_TMDS))
-                       type = DCB_CONNECTOR_DP;
-               else
-                       type = DCB_CONNECTOR_eDP;
-       } else
-       if (encoders & (1 << OUTPUT_TMDS)) {
-               if (encoders & (1 << OUTPUT_ANALOG))
-                       type = DCB_CONNECTOR_DVI_I;
-               else
-                       type = DCB_CONNECTOR_DVI_D;
-       } else
-       if (encoders & (1 << OUTPUT_ANALOG)) {
-               type = DCB_CONNECTOR_VGA;
+       u8 *dcb = dcb_table(dev);
+       if (dcb && dcb[0] >= 0x30) {
+               if (idx < dcb[2])
+                       return dcb + dcb[1] + (idx * dcb[3]);
        } else
-       if (encoders & (1 << OUTPUT_LVDS)) {
-               type = DCB_CONNECTOR_LVDS;
+       if (dcb && dcb[0] >= 0x20) {
+               u8 *i2c = ROMPTR(dev, dcb[2]);
+               u8 *ent = dcb + 8 + (idx * 8);
+               if (i2c && ent < i2c)
+                       return ent;
        } else
-       if (encoders & (1 << OUTPUT_TV)) {
-               type = DCB_CONNECTOR_TV_0;
+       if (dcb && dcb[0] >= 0x15) {
+               u8 *i2c = ROMPTR(dev, dcb[2]);
+               u8 *ent = dcb + 4 + (idx * 10);
+               if (i2c && ent < i2c)
+                       return ent;
        }
 
-       return type;
+       return NULL;
 }
 
-static void
-apply_dcb_connector_quirks(struct nvbios *bios, int idx)
-{
-       struct dcb_connector_table_entry *cte = &bios->dcb.connector.entry[idx];
-       struct drm_device *dev = bios->dev;
+int
+dcb_outp_foreach(struct drm_device *dev, void *data,
+                int (*exec)(struct drm_device *, void *, int idx, u8 *outp))
+{
+       int ret, idx = -1;
+       u8 *outp = NULL;
+       while ((outp = dcb_outp(dev, ++idx))) {
+               if (ROM32(outp[0]) == 0x00000000)
+                       break; /* seen on an NV11 with DCB v1.5 */
+               if (ROM32(outp[0]) == 0xffffffff)
+                       break; /* seen on an NV17 with DCB v2.0 */
+
+               if ((outp[0] & 0x0f) == OUTPUT_UNUSED)
+                       continue;
+               if ((outp[0] & 0x0f) == OUTPUT_EOL)
+                       break;
 
-       /* Gigabyte NX85T */
-       if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
-               if (cte->type == DCB_CONNECTOR_HDMI_1)
-                       cte->type = DCB_CONNECTOR_DVI_I;
+               ret = exec(dev, data, idx, outp);
+               if (ret)
+                       return ret;
        }
 
-       /* Gigabyte GV-NX86T512H */
-       if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
-               if (cte->type == DCB_CONNECTOR_HDMI_1)
-                       cte->type = DCB_CONNECTOR_DVI_I;
-       }
+       return 0;
 }
 
-static const u8 hpd_gpio[16] = {
-       0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
-       0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
-};
-
-static void
-parse_dcb_connector_table(struct nvbios *bios)
+u8 *
+dcb_conntab(struct drm_device *dev)
 {
-       struct drm_device *dev = bios->dev;
-       struct dcb_connector_table *ct = &bios->dcb.connector;
-       struct dcb_connector_table_entry *cte;
-       uint8_t *conntab = &bios->data[bios->dcb.connector_table_ptr];
-       uint8_t *entry;
-       int i;
-
-       if (!bios->dcb.connector_table_ptr) {
-               NV_DEBUG_KMS(dev, "No DCB connector table present\n");
-               return;
-       }
-
-       NV_INFO(dev, "DCB connector table: VHER 0x%02x %d %d %d\n",
-               conntab[0], conntab[1], conntab[2], conntab[3]);
-       if ((conntab[0] != 0x30 && conntab[0] != 0x40) ||
-           (conntab[3] != 2 && conntab[3] != 4)) {
-               NV_ERROR(dev, "  Unknown!  Please report.\n");
-               return;
+       u8 *dcb = dcb_table(dev);
+       if (dcb && dcb[0] >= 0x30 && dcb[1] >= 0x16) {
+               u8 *conntab = ROMPTR(dev, dcb[0x14]);
+               if (conntab && conntab[0] >= 0x30 && conntab[0] <= 0x40)
+                       return conntab;
        }
+       return NULL;
+}
 
-       ct->entries = conntab[2];
-
-       entry = conntab + conntab[1];
-       cte = &ct->entry[0];
-       for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
-               cte->index = i;
-               if (conntab[3] == 2)
-                       cte->entry = ROM16(entry[0]);
-               else
-                       cte->entry = ROM32(entry[0]);
-
-               cte->type  = (cte->entry & 0x000000ff) >> 0;
-               cte->index2 = (cte->entry & 0x00000f00) >> 8;
-
-               cte->gpio_tag = ffs((cte->entry & 0x07033000) >> 12);
-               cte->gpio_tag = hpd_gpio[cte->gpio_tag];
-
-               if (cte->type == 0xff)
-                       continue;
-
-               apply_dcb_connector_quirks(bios, i);
-
-               NV_INFO(dev, "  %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
-                       i, cte->entry, cte->type, cte->index, cte->gpio_tag);
-
-               /* check for known types, fallback to guessing the type
-                * from attached encoders if we hit an unknown.
-                */
-               switch (cte->type) {
-               case DCB_CONNECTOR_VGA:
-               case DCB_CONNECTOR_TV_0:
-               case DCB_CONNECTOR_TV_1:
-               case DCB_CONNECTOR_TV_3:
-               case DCB_CONNECTOR_DVI_I:
-               case DCB_CONNECTOR_DVI_D:
-               case DCB_CONNECTOR_LVDS:
-               case DCB_CONNECTOR_LVDS_SPWG:
-               case DCB_CONNECTOR_DP:
-               case DCB_CONNECTOR_eDP:
-               case DCB_CONNECTOR_HDMI_0:
-               case DCB_CONNECTOR_HDMI_1:
-                       break;
-               default:
-                       cte->type = divine_connector_type(bios, cte->index);
-                       NV_WARN(dev, "unknown type, using 0x%02x\n", cte->type);
-                       break;
-               }
-
-               if (nouveau_override_conntype) {
-                       int type = divine_connector_type(bios, cte->index);
-                       if (type != cte->type)
-                               NV_WARN(dev, " -> type 0x%02x\n", cte->type);
-               }
-
-       }
+u8 *
+dcb_conn(struct drm_device *dev, u8 idx)
+{
+       u8 *conntab = dcb_conntab(dev);
+       if (conntab && idx < conntab[2])
+               return conntab + conntab[1] + (idx * conntab[3]);
+       return NULL;
 }
 
 static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
@@ -6079,8 +5728,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
        entry->type = conn & 0xf;
        entry->i2c_index = (conn >> 4) & 0xf;
        entry->heads = (conn >> 8) & 0xf;
-       if (dcb->version >= 0x40)
-               entry->connector = (conn >> 12) & 0xf;
+       entry->connector = (conn >> 12) & 0xf;
        entry->bus = (conn >> 16) & 0xf;
        entry->location = (conn >> 20) & 0x3;
        entry->or = (conn >> 24) & 0xf;
@@ -6252,25 +5900,6 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
        return true;
 }
 
-static bool parse_dcb_entry(struct drm_device *dev, struct dcb_table *dcb,
-                           uint32_t conn, uint32_t conf)
-{
-       struct dcb_entry *entry = new_dcb_entry(dcb);
-       bool ret;
-
-       if (dcb->version >= 0x20)
-               ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
-       else
-               ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
-       if (!ret)
-               return ret;
-
-       read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
-                          entry->i2c_index, &dcb->i2c[entry->i2c_index]);
-
-       return true;
-}
-
 static
 void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
 {
@@ -6431,154 +6060,118 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
 #endif
 
        /* Make up some sane defaults */
-       fabricate_dcb_output(dcb, OUTPUT_ANALOG, LEGACY_I2C_CRT, 1, 1);
+       fabricate_dcb_output(dcb, OUTPUT_ANALOG,
+                            bios->legacy.i2c_indices.crt, 1, 1);
 
        if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
-               fabricate_dcb_output(dcb, OUTPUT_TV, LEGACY_I2C_TV,
+               fabricate_dcb_output(dcb, OUTPUT_TV,
+                                    bios->legacy.i2c_indices.tv,
                                     all_heads, 0);
 
        else if (bios->tmds.output0_script_ptr ||
                 bios->tmds.output1_script_ptr)
-               fabricate_dcb_output(dcb, OUTPUT_TMDS, LEGACY_I2C_PANEL,
+               fabricate_dcb_output(dcb, OUTPUT_TMDS,
+                                    bios->legacy.i2c_indices.panel,
                                     all_heads, 1);
 }
 
 static int
-parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
+parse_dcb_entry(struct drm_device *dev, void *data, int idx, u8 *outp)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct dcb_table *dcb = &bios->dcb;
-       uint16_t dcbptr = 0, i2ctabptr = 0;
-       uint8_t *dcbtable;
-       uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
-       bool configblock = true;
-       int recordlength = 8, confofs = 4;
-       int i;
-
-       /* get the offset from 0x36 */
-       if (dev_priv->card_type > NV_04) {
-               dcbptr = ROM16(bios->data[0x36]);
-               if (dcbptr == 0x0000)
-                       NV_WARN(dev, "No output data (DCB) found in BIOS\n");
-       }
-
-       /* this situation likely means a really old card, pre DCB */
-       if (dcbptr == 0x0) {
-               fabricate_dcb_encoder_table(dev, bios);
-               return 0;
-       }
-
-       dcbtable = &bios->data[dcbptr];
-
-       /* get DCB version */
-       dcb->version = dcbtable[0];
-       NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n",
-                dcb->version >> 4, dcb->version & 0xf);
-
-       if (dcb->version >= 0x20) { /* NV17+ */
-               uint32_t sig;
+       struct dcb_table *dcb = &dev_priv->vbios.dcb;
+       u32 conf = (dcb->version >= 0x20) ? ROM32(outp[4]) : ROM32(outp[6]);
+       u32 conn = ROM32(outp[0]);
+       bool ret;
 
-               if (dcb->version >= 0x30) { /* NV40+ */
-                       headerlen = dcbtable[1];
-                       entries = dcbtable[2];
-                       recordlength = dcbtable[3];
-                       i2ctabptr = ROM16(dcbtable[4]);
-                       sig = ROM32(dcbtable[6]);
-                       dcb->gpio_table_ptr = ROM16(dcbtable[10]);
-                       dcb->connector_table_ptr = ROM16(dcbtable[20]);
-               } else {
-                       i2ctabptr = ROM16(dcbtable[2]);
-                       sig = ROM32(dcbtable[4]);
-                       headerlen = 8;
-               }
+       if (apply_dcb_encoder_quirks(dev, idx, &conn, &conf)) {
+               struct dcb_entry *entry = new_dcb_entry(dcb);
 
-               if (sig != 0x4edcbdcb) {
-                       NV_ERROR(dev, "Bad Display Configuration Block "
-                                       "signature (%08X)\n", sig);
-                       return -EINVAL;
-               }
-       } else if (dcb->version >= 0x15) { /* some NV11 and NV20 */
-               char sig[8] = { 0 };
+               NV_TRACEWARN(dev, "DCB outp %02d: %08x %08x\n", idx, conn, conf);
 
-               strncpy(sig, (char *)&dcbtable[-7], 7);
-               i2ctabptr = ROM16(dcbtable[2]);
-               recordlength = 10;
-               confofs = 6;
+               if (dcb->version >= 0x20)
+                       ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
+               else
+                       ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
+               if (!ret)
+                       return 1; /* stop parsing */
 
-               if (strcmp(sig, "DEV_REC")) {
-                       NV_ERROR(dev, "Bad Display Configuration Block "
-                                       "signature (%s)\n", sig);
-                       return -EINVAL;
-               }
-       } else {
-               /*
-                * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but always
-                * has the same single (crt) entry, even when tv-out present, so
-                * the conclusion is this version cannot really be used.
-                * v1.2 tables (some NV6/10, and NV15+) normally have the same
-                * 5 entries, which are not specific to the card and so no use.
-                * v1.2 does have an I2C table that read_dcb_i2c_table can
-                * handle, but cards exist (nv11 in #14821) with a bad i2c table
-                * pointer, so use the indices parsed in parse_bmp_structure.
-                * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
+               /* Ignore the I2C index for on-chip TV-out, as there
+                * are cards with bogus values (nv31m in bug 23212),
+                * and it's otherwise useless.
                 */
-               NV_TRACEWARN(dev, "No useful information in BIOS output table; "
-                                 "adding all possible outputs\n");
-               fabricate_dcb_encoder_table(dev, bios);
-               return 0;
+               if (entry->type == OUTPUT_TV &&
+                   entry->location == DCB_LOC_ON_CHIP)
+                       entry->i2c_index = 0x0f;
        }
 
-       if (!i2ctabptr)
-               NV_WARN(dev, "No pointer to DCB I2C port table\n");
-       else {
-               dcb->i2c_table = &bios->data[i2ctabptr];
-               if (dcb->version >= 0x30)
-                       dcb->i2c_default_indices = dcb->i2c_table[4];
+       return 0;
+}
 
-               /*
-                * Parse the "management" I2C bus, used for hardware
-                * monitoring and some external TMDS transmitters.
-                */
-               if (dcb->version >= 0x22) {
-                       int idx = (dcb->version >= 0x40 ?
-                                  dcb->i2c_default_indices & 0xf :
-                                  2);
+static void
+dcb_fake_connectors(struct nvbios *bios)
+{
+       struct dcb_table *dcbt = &bios->dcb;
+       u8 map[16] = { };
+       int i, idx = 0;
 
-                       read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
-                                          idx, &dcb->i2c[idx]);
-               }
+       /* heuristic: if we ever get a non-zero connector field, assume
+        * that all the indices are valid and we don't need fake them.
+        */
+       for (i = 0; i < dcbt->entries; i++) {
+               if (dcbt->entry[i].connector)
+                       return;
        }
 
-       if (entries > DCB_MAX_NUM_ENTRIES)
-               entries = DCB_MAX_NUM_ENTRIES;
-
-       for (i = 0; i < entries; i++) {
-               uint32_t connection, config = 0;
-
-               connection = ROM32(dcbtable[headerlen + recordlength * i]);
-               if (configblock)
-                       config = ROM32(dcbtable[headerlen + confofs + recordlength * i]);
-
-               /* seen on an NV11 with DCB v1.5 */
-               if (connection == 0x00000000)
-                       break;
+       /* no useful connector info available, we need to make it up
+        * ourselves.  the rule here is: anything on the same i2c bus
+        * is considered to be on the same connector.  any output
+        * without an associated i2c bus is assigned its own unique
+        * connector index.
+        */
+       for (i = 0; i < dcbt->entries; i++) {
+               u8 i2c = dcbt->entry[i].i2c_index;
+               if (i2c == 0x0f) {
+                       dcbt->entry[i].connector = idx++;
+               } else {
+                       if (!map[i2c])
+                               map[i2c] = ++idx;
+                       dcbt->entry[i].connector = map[i2c] - 1;
+               }
+       }
 
-               /* seen on an NV17 with DCB v2.0 */
-               if (connection == 0xffffffff)
-                       break;
+       /* if we created more than one connector, destroy the connector
+        * table - just in case it has random, rather than stub, entries.
+        */
+       if (i > 1) {
+               u8 *conntab = dcb_conntab(bios->dev);
+               if (conntab)
+                       conntab[0] = 0x00;
+       }
+}
 
-               if ((connection & 0x0000000f) == 0x0000000f)
-                       continue;
+static int
+parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
+{
+       struct dcb_table *dcb = &bios->dcb;
+       u8 *dcbt, *conn;
+       int idx;
+
+       dcbt = dcb_table(dev);
+       if (!dcbt) {
+               /* handle pre-DCB boards */
+               if (bios->type == NVBIOS_BMP) {
+                       fabricate_dcb_encoder_table(dev, bios);
+                       return 0;
+               }
 
-               if (!apply_dcb_encoder_quirks(dev, i, &connection, &config))
-                       continue;
+               return -EINVAL;
+       }
 
-               NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
-                            dcb->entries, connection, config);
+       NV_TRACE(dev, "DCB version %d.%d\n", dcbt[0] >> 4, dcbt[0] & 0xf);
 
-               if (!parse_dcb_entry(dev, dcb, connection, config))
-                       break;
-       }
+       dcb->version = dcbt[0];
+       dcb_outp_foreach(dev, NULL, parse_dcb_entry);
 
        /*
         * apart for v2.1+ not being known for requiring merging, this
@@ -6590,77 +6183,19 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
        if (!dcb->entries)
                return -ENXIO;
 
-       parse_dcb_gpio_table(bios);
-       parse_dcb_connector_table(bios);
-       return 0;
-}
-
-static void
-fixup_legacy_connector(struct nvbios *bios)
-{
-       struct dcb_table *dcb = &bios->dcb;
-       int i, i2c, i2c_conn[DCB_MAX_NUM_I2C_ENTRIES] = { };
-
-       /*
-        * DCB 3.0 also has the table in most cases, but there are some cards
-        * where the table is filled with stub entries, and the DCB entriy
-        * indices are all 0.  We don't need the connector indices on pre-G80
-        * chips (yet?) so limit the use to DCB 4.0 and above.
-        */
-       if (dcb->version >= 0x40)
-               return;
-
-       dcb->connector.entries = 0;
-
-       /*
-        * No known connector info before v3.0, so make it up.  the rule here
-        * is: anything on the same i2c bus is considered to be on the same
-        * connector.  any output without an associated i2c bus is assigned
-        * its own unique connector index.
-        */
-       for (i = 0; i < dcb->entries; i++) {
-               /*
-                * Ignore the I2C index for on-chip TV-out, as there
-                * are cards with bogus values (nv31m in bug 23212),
-                * and it's otherwise useless.
-                */
-               if (dcb->entry[i].type == OUTPUT_TV &&
-                   dcb->entry[i].location == DCB_LOC_ON_CHIP)
-                       dcb->entry[i].i2c_index = 0xf;
-               i2c = dcb->entry[i].i2c_index;
-
-               if (i2c_conn[i2c]) {
-                       dcb->entry[i].connector = i2c_conn[i2c] - 1;
-                       continue;
+       /* dump connector table entries to log, if any exist */
+       idx = -1;
+       while ((conn = dcb_conn(dev, ++idx))) {
+               if (conn[0] != 0xff) {
+                       NV_TRACE(dev, "DCB conn %02d: ", idx);
+                       if (dcb_conntab(dev)[3] < 4)
+                               printk("%04x\n", ROM16(conn[0]));
+                       else
+                               printk("%08x\n", ROM32(conn[0]));
                }
-
-               dcb->entry[i].connector = dcb->connector.entries++;
-               if (i2c != 0xf)
-                       i2c_conn[i2c] = dcb->connector.entries;
-       }
-
-       /* Fake the connector table as well as just connector indices */
-       for (i = 0; i < dcb->connector.entries; i++) {
-               dcb->connector.entry[i].index = i;
-               dcb->connector.entry[i].type = divine_connector_type(bios, i);
-               dcb->connector.entry[i].gpio_tag = 0xff;
-       }
-}
-
-static void
-fixup_legacy_i2c(struct nvbios *bios)
-{
-       struct dcb_table *dcb = &bios->dcb;
-       int i;
-
-       for (i = 0; i < dcb->entries; i++) {
-               if (dcb->entry[i].i2c_index == LEGACY_I2C_CRT)
-                       dcb->entry[i].i2c_index = bios->legacy.i2c_indices.crt;
-               if (dcb->entry[i].i2c_index == LEGACY_I2C_PANEL)
-                       dcb->entry[i].i2c_index = bios->legacy.i2c_indices.panel;
-               if (dcb->entry[i].i2c_index == LEGACY_I2C_TV)
-                       dcb->entry[i].i2c_index = bios->legacy.i2c_indices.tv;
        }
+       dcb_fake_connectors(bios);
+       return 0;
 }
 
 static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bios, uint16_t hwsq_offset, int entry)
@@ -6879,19 +6414,6 @@ nouveau_run_vbios_init(struct drm_device *dev)
        return ret;
 }
 
-static void
-nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nvbios *bios = &dev_priv->vbios;
-       struct dcb_i2c_entry *entry;
-       int i;
-
-       entry = &bios->dcb.i2c[0];
-       for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++)
-               nouveau_i2c_fini(dev, entry);
-}
-
 static bool
 nouveau_bios_posted(struct drm_device *dev)
 {
@@ -6928,12 +6450,17 @@ nouveau_bios_init(struct drm_device *dev)
        if (ret)
                return ret;
 
-       ret = parse_dcb_table(dev, bios);
+       ret = nouveau_i2c_init(dev);
        if (ret)
                return ret;
 
-       fixup_legacy_i2c(bios);
-       fixup_legacy_connector(bios);
+       ret = nouveau_mxm_init(dev);
+       if (ret)
+               return ret;
+
+       ret = parse_dcb_table(dev, bios);
+       if (ret)
+               return ret;
 
        if (!bios->major_version)       /* we don't run version 0 bios */
                return 0;
@@ -6971,5 +6498,6 @@ nouveau_bios_init(struct drm_device *dev)
 void
 nouveau_bios_takedown(struct drm_device *dev)
 {
-       nouveau_bios_i2c_devices_takedown(dev);
+       nouveau_mxm_fini(dev);
+       nouveau_i2c_fini(dev);
 }
index 8adb69e..1e382ad 100644 (file)
 
 #define DCB_LOC_ON_CHIP 0
 
-#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
-#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
-#define ROMPTR(bios, x) (ROM16(x) ? &(bios)->data[ROM16(x)] : NULL)
+#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
+#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
+#define ROM48(x) ({ u8 *p = &(x); (u64)ROM16(p[4]) << 32 | ROM32(p[0]); })
+#define ROM64(x) le64_to_cpu(*(u64 *)&(x))
+#define ROMPTR(d,x) ({            \
+       struct drm_nouveau_private *dev_priv = (d)->dev_private; \
+       ROM16(x) ? &dev_priv->vbios.data[ROM16(x)] : NULL; \
+})
 
 struct bit_entry {
        uint8_t  id;
@@ -48,30 +53,12 @@ struct bit_entry {
 
 int bit_table(struct drm_device *, u8 id, struct bit_entry *);
 
-struct dcb_i2c_entry {
-       uint32_t entry;
-       uint8_t port_type;
-       uint8_t read, write;
-       struct nouveau_i2c_chan *chan;
-};
-
 enum dcb_gpio_tag {
        DCB_GPIO_TVDAC0 = 0xc,
        DCB_GPIO_TVDAC1 = 0x2d,
-};
-
-struct dcb_gpio_entry {
-       enum dcb_gpio_tag tag;
-       int line;
-       bool invert;
-       uint32_t entry;
-       uint8_t state_default;
-       uint8_t state[2];
-};
-
-struct dcb_gpio_table {
-       int entries;
-       struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
+       DCB_GPIO_PWM_FAN = 0x9,
+       DCB_GPIO_FAN_SENSE = 0x3d,
+       DCB_GPIO_UNUSED = 0xff
 };
 
 enum dcb_connector_type {
@@ -90,20 +77,6 @@ enum dcb_connector_type {
        DCB_CONNECTOR_NONE = 0xff
 };
 
-struct dcb_connector_table_entry {
-       uint8_t index;
-       uint32_t entry;
-       enum dcb_connector_type type;
-       uint8_t index2;
-       uint8_t gpio_tag;
-       void *drm;
-};
-
-struct dcb_connector_table {
-       int entries;
-       struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
-};
-
 enum dcb_type {
        OUTPUT_ANALOG = 0,
        OUTPUT_TV = 1,
@@ -111,6 +84,7 @@ enum dcb_type {
        OUTPUT_LVDS = 3,
        OUTPUT_DP = 6,
        OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */
+       OUTPUT_UNUSED = 15,
        OUTPUT_ANY = -1
 };
 
@@ -155,18 +129,8 @@ struct dcb_entry {
 
 struct dcb_table {
        uint8_t version;
-
        int entries;
        struct dcb_entry entry[DCB_MAX_NUM_ENTRIES];
-
-       uint8_t *i2c_table;
-       uint8_t i2c_default_indices;
-       struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
-
-       uint16_t gpio_table_ptr;
-       struct dcb_gpio_table gpio;
-       uint16_t connector_table_ptr;
-       struct dcb_connector_table connector;
 };
 
 enum nouveau_or {
@@ -195,7 +159,7 @@ enum pll_types {
        PLL_SHADER = 0x02,
        PLL_UNK03  = 0x03,
        PLL_MEMORY = 0x04,
-       PLL_UNK05  = 0x05,
+       PLL_VDEC   = 0x05,
        PLL_UNK40  = 0x40,
        PLL_UNK41  = 0x41,
        PLL_UNK42  = 0x42,
@@ -333,4 +297,11 @@ struct nvbios {
        } legacy;
 };
 
+void *dcb_table(struct drm_device *);
+void *dcb_outp(struct drm_device *, u8 idx);
+int dcb_outp_foreach(struct drm_device *, void *data,
+                    int (*)(struct drm_device *, void *, int idx, u8 *outp));
+u8 *dcb_conntab(struct drm_device *);
+u8 *dcb_conn(struct drm_device *, u8 idx);
+
 #endif
index 7cc37e6..724b41a 100644 (file)
@@ -28,6 +28,7 @@
  */
 
 #include "drmP.h"
+#include "ttm/ttm_page_alloc.h"
 
 #include "nouveau_drm.h"
 #include "nouveau_drv.h"
@@ -92,6 +93,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_bo *nvbo;
+       size_t acc_size;
        int ret;
 
        nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
@@ -114,9 +116,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
        nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
        nouveau_bo_placement_set(nvbo, flags, 0);
 
+       acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size,
+                                      sizeof(struct nouveau_bo));
+
        ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
                          ttm_bo_type_device, &nvbo->placement,
-                         align >> PAGE_SHIFT, 0, false, NULL, size,
+                         align >> PAGE_SHIFT, 0, false, NULL, acc_size,
                          nouveau_bo_del_ttm);
        if (ret) {
                /* ttm will call nouveau_bo_del_ttm if it fails.. */
@@ -343,8 +348,10 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
                *mem = val;
 }
 
-static struct ttm_backend *
-nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
+static struct ttm_tt *
+nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
+                     unsigned long size, uint32_t page_flags,
+                     struct page *dummy_read_page)
 {
        struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
        struct drm_device *dev = dev_priv->dev;
@@ -352,11 +359,13 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
        switch (dev_priv->gart_info.type) {
 #if __OS_HAS_AGP
        case NOUVEAU_GART_AGP:
-               return ttm_agp_backend_init(bdev, dev->agp->bridge);
+               return ttm_agp_tt_create(bdev, dev->agp->bridge,
+                                        size, page_flags, dummy_read_page);
 #endif
        case NOUVEAU_GART_PDMA:
        case NOUVEAU_GART_HW:
-               return nouveau_sgdma_init_ttm(dev);
+               return nouveau_sgdma_create_ttm(bdev, size, page_flags,
+                                               dummy_read_page);
        default:
                NV_ERROR(dev, "Unknown GART type %d\n",
                         dev_priv->gart_info.type);
@@ -673,8 +682,7 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
        if (mem->mem_type == TTM_PL_VRAM)
                nouveau_vm_map(vma, node);
        else
-               nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT,
-                                 node, node->pages);
+               nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
 
        return 0;
 }
@@ -801,19 +809,18 @@ out:
 static void
 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
 {
-       struct nouveau_mem *node = new_mem->mm_node;
        struct nouveau_bo *nvbo = nouveau_bo(bo);
        struct nouveau_vma *vma;
 
        list_for_each_entry(vma, &nvbo->vma_list, head) {
-               if (new_mem->mem_type == TTM_PL_VRAM) {
+               if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
                        nouveau_vm_map(vma, new_mem->mm_node);
                } else
-               if (new_mem->mem_type == TTM_PL_TT &&
+               if (new_mem && new_mem->mem_type == TTM_PL_TT &&
                    nvbo->page_shift == vma->vm->spg_shift) {
                        nouveau_vm_map_sg(vma, 0, new_mem->
                                          num_pages << PAGE_SHIFT,
-                                         node, node->pages);
+                                         new_mem->mm_node);
                } else {
                        nouveau_vm_unmap(vma);
                }
@@ -1044,8 +1051,94 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
        nouveau_fence_unref(&old_fence);
 }
 
+static int
+nouveau_ttm_tt_populate(struct ttm_tt *ttm)
+{
+       struct ttm_dma_tt *ttm_dma = (void *)ttm;
+       struct drm_nouveau_private *dev_priv;
+       struct drm_device *dev;
+       unsigned i;
+       int r;
+
+       if (ttm->state != tt_unpopulated)
+               return 0;
+
+       dev_priv = nouveau_bdev(ttm->bdev);
+       dev = dev_priv->dev;
+
+#if __OS_HAS_AGP
+       if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+               return ttm_agp_tt_populate(ttm);
+       }
+#endif
+
+#ifdef CONFIG_SWIOTLB
+       if (swiotlb_nr_tbl()) {
+               return ttm_dma_populate((void *)ttm, dev->dev);
+       }
+#endif
+
+       r = ttm_pool_populate(ttm);
+       if (r) {
+               return r;
+       }
+
+       for (i = 0; i < ttm->num_pages; i++) {
+               ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
+                                                  0, PAGE_SIZE,
+                                                  PCI_DMA_BIDIRECTIONAL);
+               if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
+                       while (--i) {
+                               pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
+                                              PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+                               ttm_dma->dma_address[i] = 0;
+                       }
+                       ttm_pool_unpopulate(ttm);
+                       return -EFAULT;
+               }
+       }
+       return 0;
+}
+
+static void
+nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+       struct ttm_dma_tt *ttm_dma = (void *)ttm;
+       struct drm_nouveau_private *dev_priv;
+       struct drm_device *dev;
+       unsigned i;
+
+       dev_priv = nouveau_bdev(ttm->bdev);
+       dev = dev_priv->dev;
+
+#if __OS_HAS_AGP
+       if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+               ttm_agp_tt_unpopulate(ttm);
+               return;
+       }
+#endif
+
+#ifdef CONFIG_SWIOTLB
+       if (swiotlb_nr_tbl()) {
+               ttm_dma_unpopulate((void *)ttm, dev->dev);
+               return;
+       }
+#endif
+
+       for (i = 0; i < ttm->num_pages; i++) {
+               if (ttm_dma->dma_address[i]) {
+                       pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
+                                      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+               }
+       }
+
+       ttm_pool_unpopulate(ttm);
+}
+
 struct ttm_bo_driver nouveau_bo_driver = {
-       .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
+       .ttm_tt_create = &nouveau_ttm_tt_create,
+       .ttm_tt_populate = &nouveau_ttm_tt_populate,
+       .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
        .invalidate_caches = nouveau_bo_invalidate_caches,
        .init_mem_type = nouveau_bo_init_mem_type,
        .evict_flags = nouveau_bo_evict_flags,
@@ -1091,7 +1184,7 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
                nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
        else
        if (nvbo->bo.mem.mem_type == TTM_PL_TT)
-               nouveau_vm_map_sg(vma, 0, size, node, node->pages);
+               nouveau_vm_map_sg(vma, 0, size, node);
 
        list_add_tail(&vma->head, &nvbo->vma_list);
        vma->refcount = 1;
index bb6ec9e..a018def 100644 (file)
@@ -187,6 +187,8 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
        nouveau_dma_pre_init(chan);
        chan->user_put = 0x40;
        chan->user_get = 0x44;
+       if (dev_priv->card_type >= NV_50)
+                chan->user_get_hi = 0x60;
 
        /* disable the fifo caches */
        pfifo->reassign(dev, false);
index cea6696..f3ce34b 100644 (file)
@@ -35,6 +35,7 @@
 #include "nouveau_encoder.h"
 #include "nouveau_crtc.h"
 #include "nouveau_connector.h"
+#include "nouveau_gpio.h"
 #include "nouveau_hw.h"
 
 static void nouveau_connector_hotplug(void *, int);
@@ -78,29 +79,11 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
        return NULL;
 }
 
-/*TODO: This could use improvement, and learn to handle the fixed
- *      BIOS tables etc.  It's fine currently, for its only user.
- */
-int
-nouveau_connector_bpp(struct drm_connector *connector)
-{
-       struct nouveau_connector *nv_connector = nouveau_connector(connector);
-
-       if (nv_connector->edid && nv_connector->edid->revision >= 4) {
-               u8 bpc = ((nv_connector->edid->input & 0x70) >> 3) + 4;
-               if (bpc > 4)
-                       return bpc;
-       }
-
-       return 18;
-}
-
 static void
 nouveau_connector_destroy(struct drm_connector *connector)
 {
        struct nouveau_connector *nv_connector = nouveau_connector(connector);
        struct drm_nouveau_private *dev_priv;
-       struct nouveau_gpio_engine *pgpio;
        struct drm_device *dev;
 
        if (!nv_connector)
@@ -110,10 +93,9 @@ nouveau_connector_destroy(struct drm_connector *connector)
        dev_priv = dev->dev_private;
        NV_DEBUG_KMS(dev, "\n");
 
-       pgpio = &dev_priv->engine.gpio;
-       if (pgpio->irq_unregister) {
-               pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag,
-                                     nouveau_connector_hotplug, connector);
+       if (nv_connector->hpd != DCB_GPIO_UNUSED) {
+               nouveau_gpio_isr_del(dev, 0, nv_connector->hpd, 0xff,
+                                    nouveau_connector_hotplug, connector);
        }
 
        kfree(nv_connector->edid);
@@ -198,6 +180,10 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
                return;
        nv_connector->detected_encoder = nv_encoder;
 
+       if (dev_priv->card_type >= NV_50) {
+               connector->interlace_allowed = true;
+               connector->doublescan_allowed = true;
+       } else
        if (nv_encoder->dcb->type == OUTPUT_LVDS ||
            nv_encoder->dcb->type == OUTPUT_TMDS) {
                connector->doublescan_allowed = false;
@@ -214,7 +200,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
                        connector->interlace_allowed = true;
        }
 
-       if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
+       if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
                drm_connector_property_set_value(connector,
                        dev->mode_config.dvi_i_subconnector_property,
                        nv_encoder->dcb->type == OUTPUT_TMDS ?
@@ -397,7 +383,7 @@ nouveau_connector_force(struct drm_connector *connector)
        struct nouveau_encoder *nv_encoder;
        int type;
 
-       if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
+       if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
                if (connector->force == DRM_FORCE_ON_DIGITAL)
                        type = OUTPUT_TMDS;
                else
@@ -420,15 +406,21 @@ static int
 nouveau_connector_set_property(struct drm_connector *connector,
                               struct drm_property *property, uint64_t value)
 {
+       struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+       struct nouveau_display_engine *disp = &dev_priv->engine.display;
        struct nouveau_connector *nv_connector = nouveau_connector(connector);
        struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
        struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
        struct drm_device *dev = connector->dev;
+       struct nouveau_crtc *nv_crtc;
        int ret;
 
+       nv_crtc = NULL;
+       if (connector->encoder && connector->encoder->crtc)
+               nv_crtc = nouveau_crtc(connector->encoder->crtc);
+
        /* Scaling mode */
        if (property == dev->mode_config.scaling_mode_property) {
-               struct nouveau_crtc *nv_crtc = NULL;
                bool modeset = false;
 
                switch (value) {
@@ -454,8 +446,6 @@ nouveau_connector_set_property(struct drm_connector *connector,
                        modeset = true;
                nv_connector->scaling_mode = value;
 
-               if (connector->encoder && connector->encoder->crtc)
-                       nv_crtc = nouveau_crtc(connector->encoder->crtc);
                if (!nv_crtc)
                        return 0;
 
@@ -467,7 +457,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
                        if (!ret)
                                return -EINVAL;
                } else {
-                       ret = nv_crtc->set_scale(nv_crtc, value, true);
+                       ret = nv_crtc->set_scale(nv_crtc, true);
                        if (ret)
                                return ret;
                }
@@ -475,23 +465,58 @@ nouveau_connector_set_property(struct drm_connector *connector,
                return 0;
        }
 
-       /* Dithering */
-       if (property == dev->mode_config.dithering_mode_property) {
-               struct nouveau_crtc *nv_crtc = NULL;
+       /* Underscan */
+       if (property == disp->underscan_property) {
+               if (nv_connector->underscan != value) {
+                       nv_connector->underscan = value;
+                       if (!nv_crtc || !nv_crtc->set_scale)
+                               return 0;
 
-               if (value == DRM_MODE_DITHERING_ON)
-                       nv_connector->use_dithering = true;
-               else
-                       nv_connector->use_dithering = false;
+                       return nv_crtc->set_scale(nv_crtc, true);
+               }
+
+               return 0;
+       }
+
+       if (property == disp->underscan_hborder_property) {
+               if (nv_connector->underscan_hborder != value) {
+                       nv_connector->underscan_hborder = value;
+                       if (!nv_crtc || !nv_crtc->set_scale)
+                               return 0;
+
+                       return nv_crtc->set_scale(nv_crtc, true);
+               }
+
+               return 0;
+       }
+
+       if (property == disp->underscan_vborder_property) {
+               if (nv_connector->underscan_vborder != value) {
+                       nv_connector->underscan_vborder = value;
+                       if (!nv_crtc || !nv_crtc->set_scale)
+                               return 0;
+
+                       return nv_crtc->set_scale(nv_crtc, true);
+               }
+
+               return 0;
+       }
+
+       /* Dithering */
+       if (property == disp->dithering_mode) {
+               nv_connector->dithering_mode = value;
+               if (!nv_crtc || !nv_crtc->set_dither)
+                       return 0;
 
-               if (connector->encoder && connector->encoder->crtc)
-                       nv_crtc = nouveau_crtc(connector->encoder->crtc);
+               return nv_crtc->set_dither(nv_crtc, true);
+       }
 
+       if (property == disp->dithering_depth) {
+               nv_connector->dithering_depth = value;
                if (!nv_crtc || !nv_crtc->set_dither)
                        return 0;
 
-               return nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering,
-                                          true);
+               return nv_crtc->set_dither(nv_crtc, true);
        }
 
        if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV)
@@ -602,6 +627,46 @@ nouveau_connector_scaler_modes_add(struct drm_connector *connector)
        return modes;
 }
 
+static void
+nouveau_connector_detect_depth(struct drm_connector *connector)
+{
+       struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+       struct nouveau_connector *nv_connector = nouveau_connector(connector);
+       struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+       struct nvbios *bios = &dev_priv->vbios;
+       struct drm_display_mode *mode = nv_connector->native_mode;
+       bool duallink;
+
+       /* if the edid is feeling nice enough to provide this info, use it */
+       if (nv_connector->edid && connector->display_info.bpc)
+               return;
+
+       /* if not, we're out of options unless we're LVDS, default to 6bpc */
+       connector->display_info.bpc = 6;
+       if (nv_encoder->dcb->type != OUTPUT_LVDS)
+               return;
+
+       /* LVDS: panel straps */
+       if (bios->fp_no_ddc) {
+               if (bios->fp.if_is_24bit)
+                       connector->display_info.bpc = 8;
+               return;
+       }
+
+       /* LVDS: DDC panel, need to first determine the number of links to
+        * know which if_is_24bit flag to check...
+        */
+       if (nv_connector->edid &&
+           nv_connector->type == DCB_CONNECTOR_LVDS_SPWG)
+               duallink = ((u8 *)nv_connector->edid)[121] == 2;
+       else
+               duallink = mode->clock >= bios->fp.duallink_transition_clk;
+
+       if ((!duallink && (bios->fp.strapless_is_24bit & 1)) ||
+           ( duallink && (bios->fp.strapless_is_24bit & 2)))
+               connector->display_info.bpc = 8;
+}
+
 static int
 nouveau_connector_get_modes(struct drm_connector *connector)
 {
@@ -631,6 +696,12 @@ nouveau_connector_get_modes(struct drm_connector *connector)
                nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
        }
 
+       /* Determine display colour depth for everything except LVDS now,
+        * DP requires this before mode_valid() is called.
+        */
+       if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
+               nouveau_connector_detect_depth(connector);
+
        /* Find the native mode if this is a digital panel, if we didn't
         * find any modes through DDC previously add the native mode to
         * the list of modes.
@@ -646,12 +717,19 @@ nouveau_connector_get_modes(struct drm_connector *connector)
                ret = 1;
        }
 
+       /* Determine LVDS colour depth, must happen after determining
+        * "native" mode as some VBIOS tables require us to use the
+        * pixel clock as part of the lookup...
+        */
+       if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+               nouveau_connector_detect_depth(connector);
+
        if (nv_encoder->dcb->type == OUTPUT_TV)
                ret = get_slave_funcs(encoder)->get_modes(encoder, connector);
 
-       if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS ||
-           nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG ||
-           nv_connector->dcb->type == DCB_CONNECTOR_eDP)
+       if (nv_connector->type == DCB_CONNECTOR_LVDS ||
+           nv_connector->type == DCB_CONNECTOR_LVDS_SPWG ||
+           nv_connector->type == DCB_CONNECTOR_eDP)
                ret += nouveau_connector_scaler_modes_add(connector);
 
        return ret;
@@ -710,7 +788,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
        case OUTPUT_DP:
                max_clock  = nv_encoder->dp.link_nr;
                max_clock *= nv_encoder->dp.link_bw;
-               clock = clock * nouveau_connector_bpp(connector) / 10;
+               clock = clock * (connector->display_info.bpc * 3) / 10;
                break;
        default:
                BUG_ON(1);
@@ -768,96 +846,175 @@ nouveau_connector_funcs_lvds = {
        .force = nouveau_connector_force
 };
 
+static int
+drm_conntype_from_dcb(enum dcb_connector_type dcb)
+{
+       switch (dcb) {
+       case DCB_CONNECTOR_VGA      : return DRM_MODE_CONNECTOR_VGA;
+       case DCB_CONNECTOR_TV_0     :
+       case DCB_CONNECTOR_TV_1     :
+       case DCB_CONNECTOR_TV_3     : return DRM_MODE_CONNECTOR_TV;
+       case DCB_CONNECTOR_DVI_I    : return DRM_MODE_CONNECTOR_DVII;
+       case DCB_CONNECTOR_DVI_D    : return DRM_MODE_CONNECTOR_DVID;
+       case DCB_CONNECTOR_LVDS     :
+       case DCB_CONNECTOR_LVDS_SPWG: return DRM_MODE_CONNECTOR_LVDS;
+       case DCB_CONNECTOR_DP       : return DRM_MODE_CONNECTOR_DisplayPort;
+       case DCB_CONNECTOR_eDP      : return DRM_MODE_CONNECTOR_eDP;
+       case DCB_CONNECTOR_HDMI_0   :
+       case DCB_CONNECTOR_HDMI_1   : return DRM_MODE_CONNECTOR_HDMIA;
+       default:
+               break;
+       }
+
+       return DRM_MODE_CONNECTOR_Unknown;
+}
+
 struct drm_connector *
 nouveau_connector_create(struct drm_device *dev, int index)
 {
        const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+       struct nouveau_display_engine *disp = &dev_priv->engine.display;
        struct nouveau_connector *nv_connector = NULL;
-       struct dcb_connector_table_entry *dcb = NULL;
        struct drm_connector *connector;
        int type, ret = 0;
+       bool dummy;
 
        NV_DEBUG_KMS(dev, "\n");
 
-       if (index >= dev_priv->vbios.dcb.connector.entries)
-               return ERR_PTR(-EINVAL);
-
-       dcb = &dev_priv->vbios.dcb.connector.entry[index];
-       if (dcb->drm)
-               return dcb->drm;
-
-       switch (dcb->type) {
-       case DCB_CONNECTOR_VGA:
-               type = DRM_MODE_CONNECTOR_VGA;
-               break;
-       case DCB_CONNECTOR_TV_0:
-       case DCB_CONNECTOR_TV_1:
-       case DCB_CONNECTOR_TV_3:
-               type = DRM_MODE_CONNECTOR_TV;
-               break;
-       case DCB_CONNECTOR_DVI_I:
-               type = DRM_MODE_CONNECTOR_DVII;
-               break;
-       case DCB_CONNECTOR_DVI_D:
-               type = DRM_MODE_CONNECTOR_DVID;
-               break;
-       case DCB_CONNECTOR_HDMI_0:
-       case DCB_CONNECTOR_HDMI_1:
-               type = DRM_MODE_CONNECTOR_HDMIA;
-               break;
-       case DCB_CONNECTOR_LVDS:
-       case DCB_CONNECTOR_LVDS_SPWG:
-               type = DRM_MODE_CONNECTOR_LVDS;
-               funcs = &nouveau_connector_funcs_lvds;
-               break;
-       case DCB_CONNECTOR_DP:
-               type = DRM_MODE_CONNECTOR_DisplayPort;
-               break;
-       case DCB_CONNECTOR_eDP:
-               type = DRM_MODE_CONNECTOR_eDP;
-               break;
-       default:
-               NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type);
-               return ERR_PTR(-EINVAL);
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               nv_connector = nouveau_connector(connector);
+               if (nv_connector->index == index)
+                       return connector;
        }
 
        nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
        if (!nv_connector)
                return ERR_PTR(-ENOMEM);
-       nv_connector->dcb = dcb;
+
        connector = &nv_connector->base;
+       nv_connector->index = index;
+
+       /* attempt to parse vbios connector type and hotplug gpio */
+       nv_connector->dcb = dcb_conn(dev, index);
+       if (nv_connector->dcb) {
+               static const u8 hpd[16] = {
+                       0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
+               };
+
+               u32 entry = ROM16(nv_connector->dcb[0]);
+               if (dcb_conntab(dev)[3] >= 4)
+                       entry |= (u32)ROM16(nv_connector->dcb[2]) << 16;
+
+               nv_connector->hpd = ffs((entry & 0x07033000) >> 12);
+               nv_connector->hpd = hpd[nv_connector->hpd];
+
+               nv_connector->type = nv_connector->dcb[0];
+               if (drm_conntype_from_dcb(nv_connector->type) ==
+                                         DRM_MODE_CONNECTOR_Unknown) {
+                       NV_WARN(dev, "unknown connector type %02x\n",
+                               nv_connector->type);
+                       nv_connector->type = DCB_CONNECTOR_NONE;
+               }
 
-       /* defaults, will get overridden in detect() */
-       connector->interlace_allowed = false;
-       connector->doublescan_allowed = false;
+               /* Gigabyte NX85T */
+               if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
+                       if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
+                               nv_connector->type = DCB_CONNECTOR_DVI_I;
+               }
 
-       drm_connector_init(dev, connector, funcs, type);
-       drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
+               /* Gigabyte GV-NX86T512H */
+               if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
+                       if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
+                               nv_connector->type = DCB_CONNECTOR_DVI_I;
+               }
+       } else {
+               nv_connector->type = DCB_CONNECTOR_NONE;
+               nv_connector->hpd = DCB_GPIO_UNUSED;
+       }
+
+       /* no vbios data, or an unknown dcb connector type - attempt to
+        * figure out something suitable ourselves
+        */
+       if (nv_connector->type == DCB_CONNECTOR_NONE) {
+               struct drm_nouveau_private *dev_priv = dev->dev_private;
+               struct dcb_table *dcbt = &dev_priv->vbios.dcb;
+               u32 encoders = 0;
+               int i;
+
+               for (i = 0; i < dcbt->entries; i++) {
+                       if (dcbt->entry[i].connector == nv_connector->index)
+                               encoders |= (1 << dcbt->entry[i].type);
+               }
 
-       /* Check if we need dithering enabled */
-       if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
-               bool dummy, is_24bit = false;
+               if (encoders & (1 << OUTPUT_DP)) {
+                       if (encoders & (1 << OUTPUT_TMDS))
+                               nv_connector->type = DCB_CONNECTOR_DP;
+                       else
+                               nv_connector->type = DCB_CONNECTOR_eDP;
+               } else
+               if (encoders & (1 << OUTPUT_TMDS)) {
+                       if (encoders & (1 << OUTPUT_ANALOG))
+                               nv_connector->type = DCB_CONNECTOR_DVI_I;
+                       else
+                               nv_connector->type = DCB_CONNECTOR_DVI_D;
+               } else
+               if (encoders & (1 << OUTPUT_ANALOG)) {
+                       nv_connector->type = DCB_CONNECTOR_VGA;
+               } else
+               if (encoders & (1 << OUTPUT_LVDS)) {
+                       nv_connector->type = DCB_CONNECTOR_LVDS;
+               } else
+               if (encoders & (1 << OUTPUT_TV)) {
+                       nv_connector->type = DCB_CONNECTOR_TV_0;
+               }
+       }
 
-               ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &is_24bit);
+       type = drm_conntype_from_dcb(nv_connector->type);
+       if (type == DRM_MODE_CONNECTOR_LVDS) {
+               ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &dummy);
                if (ret) {
-                       NV_ERROR(dev, "Error parsing LVDS table, disabling "
-                                "LVDS\n");
-                       goto fail;
+                       NV_ERROR(dev, "Error parsing LVDS table, disabling\n");
+                       kfree(nv_connector);
+                       return ERR_PTR(ret);
                }
 
-               nv_connector->use_dithering = !is_24bit;
+               funcs = &nouveau_connector_funcs_lvds;
+       } else {
+               funcs = &nouveau_connector_funcs;
        }
 
+       /* defaults, will get overridden in detect() */
+       connector->interlace_allowed = false;
+       connector->doublescan_allowed = false;
+
+       drm_connector_init(dev, connector, funcs, type);
+       drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
+
        /* Init DVI-I specific properties */
-       if (dcb->type == DCB_CONNECTOR_DVI_I) {
-               drm_mode_create_dvi_i_properties(dev);
+       if (nv_connector->type == DCB_CONNECTOR_DVI_I)
                drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
-               drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
+
+       /* Add overscan compensation options to digital outputs */
+       if (disp->underscan_property &&
+           (nv_connector->type == DCB_CONNECTOR_DVI_D ||
+            nv_connector->type == DCB_CONNECTOR_DVI_I ||
+            nv_connector->type == DCB_CONNECTOR_HDMI_0 ||
+            nv_connector->type == DCB_CONNECTOR_HDMI_1 ||
+            nv_connector->type == DCB_CONNECTOR_DP)) {
+               drm_connector_attach_property(connector,
+                                             disp->underscan_property,
+                                             UNDERSCAN_OFF);
+               drm_connector_attach_property(connector,
+                                             disp->underscan_hborder_property,
+                                             0);
+               drm_connector_attach_property(connector,
+                                             disp->underscan_vborder_property,
+                                             0);
        }
 
-       switch (dcb->type) {
+       switch (nv_connector->type) {
        case DCB_CONNECTOR_VGA:
                if (dev_priv->card_type >= NV_50) {
                        drm_connector_attach_property(connector,
@@ -876,32 +1033,32 @@ nouveau_connector_create(struct drm_device *dev, int index)
                drm_connector_attach_property(connector,
                                dev->mode_config.scaling_mode_property,
                                nv_connector->scaling_mode);
-               drm_connector_attach_property(connector,
-                               dev->mode_config.dithering_mode_property,
-                               nv_connector->use_dithering ?
-                               DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
+               if (disp->dithering_mode) {
+                       nv_connector->dithering_mode = DITHERING_MODE_AUTO;
+                       drm_connector_attach_property(connector,
+                                               disp->dithering_mode,
+                                               nv_connector->dithering_mode);
+               }
+               if (disp->dithering_depth) {
+                       nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
+                       drm_connector_attach_property(connector,
+                                               disp->dithering_depth,
+                                               nv_connector->dithering_depth);
+               }
                break;
        }
 
-       if (nv_connector->dcb->gpio_tag != 0xff && pgpio->irq_register) {
-               pgpio->irq_register(dev, nv_connector->dcb->gpio_tag,
-                                   nouveau_connector_hotplug, connector);
-
-               connector->polled = DRM_CONNECTOR_POLL_HPD;
-       } else {
-               connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+       if (nv_connector->hpd != DCB_GPIO_UNUSED) {
+               ret = nouveau_gpio_isr_add(dev, 0, nv_connector->hpd, 0xff,
+                                          nouveau_connector_hotplug,
+                                          connector);
+               if (ret == 0)
+                       connector->polled = DRM_CONNECTOR_POLL_HPD;
        }
 
        drm_sysfs_connector_add(connector);
-
-       dcb->drm = connector;
-       return dcb->drm;
-
-fail:
-       drm_connector_cleanup(connector);
-       kfree(connector);
-       return ERR_PTR(ret);
-
+       return connector;
 }
 
 static void
index 711b1e9..e485702 100644 (file)
 #include "drm_edid.h"
 #include "nouveau_i2c.h"
 
+enum nouveau_underscan_type {
+       UNDERSCAN_OFF,
+       UNDERSCAN_ON,
+       UNDERSCAN_AUTO,
+};
+
+/* the enum values specifically defined here match nv50/nvd0 hw values, and
+ * the code relies on this
+ */
+enum nouveau_dithering_mode {
+       DITHERING_MODE_OFF = 0x00,
+       DITHERING_MODE_ON = 0x01,
+       DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
+       DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
+       DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
+       DITHERING_MODE_AUTO
+};
+
+enum nouveau_dithering_depth {
+       DITHERING_DEPTH_6BPC = 0x00,
+       DITHERING_DEPTH_8BPC = 0x02,
+       DITHERING_DEPTH_AUTO
+};
+
 struct nouveau_connector {
        struct drm_connector base;
+       enum dcb_connector_type type;
+       u8 index;
+       u8 *dcb;
+       u8 hpd;
 
-       struct dcb_connector_table_entry *dcb;
-
+       int dithering_mode;
+       int dithering_depth;
        int scaling_mode;
-       bool use_dithering;
+       enum nouveau_underscan_type underscan;
+       u32 underscan_hborder;
+       u32 underscan_vborder;
 
        struct nouveau_encoder *detected_encoder;
        struct edid *edid;
index bf8e128..686f6b4 100644 (file)
@@ -32,8 +32,6 @@ struct nouveau_crtc {
 
        int index;
 
-       struct drm_display_mode *mode;
-
        uint32_t dpms_saved_fp_control;
        uint32_t fp_users;
        int saturation;
@@ -67,8 +65,8 @@ struct nouveau_crtc {
                int depth;
        } lut;
 
-       int (*set_dither)(struct nouveau_crtc *crtc, bool on, bool update);
-       int (*set_scale)(struct nouveau_crtc *crtc, int mode, bool update);
+       int (*set_dither)(struct nouveau_crtc *crtc, bool update);
+       int (*set_scale)(struct nouveau_crtc *crtc, bool update);
 };
 
 static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc)
index 8e15923..fa2ec49 100644 (file)
@@ -44,7 +44,7 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data)
        seq_printf(m, "channel id    : %d\n", chan->id);
 
        seq_printf(m, "cpu fifo state:\n");
-       seq_printf(m, "          base: 0x%08x\n", chan->pushbuf_base);
+       seq_printf(m, "          base: 0x%10llx\n", chan->pushbuf_base);
        seq_printf(m, "           max: 0x%08x\n", chan->dma.max << 2);
        seq_printf(m, "           cur: 0x%08x\n", chan->dma.cur << 2);
        seq_printf(m, "           put: 0x%08x\n", chan->dma.put << 2);
@@ -178,6 +178,7 @@ static struct drm_info_list nouveau_debugfs_list[] = {
        { "memory", nouveau_debugfs_memory_info, 0, NULL },
        { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
        { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
+       { "ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL },
 };
 #define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
 
index b12fd2c..3cb52bc 100644 (file)
@@ -32,6 +32,8 @@
 #include "nouveau_hw.h"
 #include "nouveau_crtc.h"
 #include "nouveau_dma.h"
+#include "nouveau_connector.h"
+#include "nouveau_gpio.h"
 #include "nv50_display.h"
 
 static void
@@ -64,7 +66,7 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
 int
 nouveau_framebuffer_init(struct drm_device *dev,
                         struct nouveau_framebuffer *nv_fb,
-                        struct drm_mode_fb_cmd *mode_cmd,
+                        struct drm_mode_fb_cmd2 *mode_cmd,
                         struct nouveau_bo *nvbo)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -107,14 +109,14 @@ nouveau_framebuffer_init(struct drm_device *dev,
 
                if (!tile_flags) {
                        if (dev_priv->card_type < NV_D0)
-                               nv_fb->r_pitch = 0x00100000 | fb->pitch;
+                               nv_fb->r_pitch = 0x00100000 | fb->pitches[0];
                        else
-                               nv_fb->r_pitch = 0x01000000 | fb->pitch;
+                               nv_fb->r_pitch = 0x01000000 | fb->pitches[0];
                } else {
                        u32 mode = nvbo->tile_mode;
                        if (dev_priv->card_type >= NV_C0)
                                mode >>= 4;
-                       nv_fb->r_pitch = ((fb->pitch / 4) << 4) | mode;
+                       nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode;
                }
        }
 
@@ -124,13 +126,13 @@ nouveau_framebuffer_init(struct drm_device *dev,
 static struct drm_framebuffer *
 nouveau_user_framebuffer_create(struct drm_device *dev,
                                struct drm_file *file_priv,
-                               struct drm_mode_fb_cmd *mode_cmd)
+                               struct drm_mode_fb_cmd2 *mode_cmd)
 {
        struct nouveau_framebuffer *nouveau_fb;
        struct drm_gem_object *gem;
        int ret;
 
-       gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
+       gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
        if (!gem)
                return ERR_PTR(-ENOENT);
 
@@ -147,11 +149,186 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
        return &nouveau_fb->base;
 }
 
-const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
+static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
        .fb_create = nouveau_user_framebuffer_create,
        .output_poll_changed = nouveau_fbcon_output_poll_changed,
 };
 
+
+struct drm_prop_enum_list {
+       u8 gen_mask;
+       int type;
+       char *name;
+};
+
+static struct drm_prop_enum_list underscan[] = {
+       { 6, UNDERSCAN_AUTO, "auto" },
+       { 6, UNDERSCAN_OFF, "off" },
+       { 6, UNDERSCAN_ON, "on" },
+       {}
+};
+
+static struct drm_prop_enum_list dither_mode[] = {
+       { 7, DITHERING_MODE_AUTO, "auto" },
+       { 7, DITHERING_MODE_OFF, "off" },
+       { 1, DITHERING_MODE_ON, "on" },
+       { 6, DITHERING_MODE_STATIC2X2, "static 2x2" },
+       { 6, DITHERING_MODE_DYNAMIC2X2, "dynamic 2x2" },
+       { 4, DITHERING_MODE_TEMPORAL, "temporal" },
+       {}
+};
+
+static struct drm_prop_enum_list dither_depth[] = {
+       { 6, DITHERING_DEPTH_AUTO, "auto" },
+       { 6, DITHERING_DEPTH_6BPC, "6 bpc" },
+       { 6, DITHERING_DEPTH_8BPC, "8 bpc" },
+       {}
+};
+
+#define PROP_ENUM(p,gen,n,list) do {                                           \
+       struct drm_prop_enum_list *l = (list);                                 \
+       int c = 0;                                                             \
+       while (l->gen_mask) {                                                  \
+               if (l->gen_mask & (1 << (gen)))                                \
+                       c++;                                                   \
+               l++;                                                           \
+       }                                                                      \
+       if (c) {                                                               \
+               p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c);        \
+               l = (list);                                                    \
+               c = 0;                                                         \
+               while (p && l->gen_mask) {                                     \
+                       if (l->gen_mask & (1 << (gen))) {                      \
+                               drm_property_add_enum(p, c, l->type, l->name); \
+                               c++;                                           \
+                       }                                                      \
+                       l++;                                                   \
+               }                                                              \
+       }                                                                      \
+} while(0)
+
+int
+nouveau_display_init(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_display_engine *disp = &dev_priv->engine.display;
+       struct drm_connector *connector;
+       int ret;
+
+       ret = disp->init(dev);
+       if (ret)
+               return ret;
+
+       drm_kms_helper_poll_enable(dev);
+
+       /* enable hotplug interrupts */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct nouveau_connector *conn = nouveau_connector(connector);
+               nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, true);
+       }
+
+       return ret;
+}
+
+void
+nouveau_display_fini(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_display_engine *disp = &dev_priv->engine.display;
+       struct drm_connector *connector;
+
+       /* disable hotplug interrupts */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct nouveau_connector *conn = nouveau_connector(connector);
+               nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, false);
+       }
+
+       drm_kms_helper_poll_disable(dev);
+       disp->fini(dev);
+}
+
+int
+nouveau_display_create(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_display_engine *disp = &dev_priv->engine.display;
+       int ret, gen;
+
+       drm_mode_config_init(dev);
+       drm_mode_create_scaling_mode_property(dev);
+       drm_mode_create_dvi_i_properties(dev);
+
+       if (dev_priv->card_type < NV_50)
+               gen = 0;
+       else
+       if (dev_priv->card_type < NV_D0)
+               gen = 1;
+       else
+               gen = 2;
+
+       PROP_ENUM(disp->dithering_mode, gen, "dithering mode", dither_mode);
+       PROP_ENUM(disp->dithering_depth, gen, "dithering depth", dither_depth);
+       PROP_ENUM(disp->underscan_property, gen, "underscan", underscan);
+
+       disp->underscan_hborder_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "underscan hborder", 2);
+       disp->underscan_hborder_property->values[0] = 0;
+       disp->underscan_hborder_property->values[1] = 128;
+
+       disp->underscan_vborder_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "underscan vborder", 2);
+       disp->underscan_vborder_property->values[0] = 0;
+       disp->underscan_vborder_property->values[1] = 128;
+
+       dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
+       dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
+
+       dev->mode_config.min_width = 0;
+       dev->mode_config.min_height = 0;
+       if (dev_priv->card_type < NV_10) {
+               dev->mode_config.max_width = 2048;
+               dev->mode_config.max_height = 2048;
+       } else
+       if (dev_priv->card_type < NV_50) {
+               dev->mode_config.max_width = 4096;
+               dev->mode_config.max_height = 4096;
+       } else {
+               dev->mode_config.max_width = 8192;
+               dev->mode_config.max_height = 8192;
+       }
+
+       drm_kms_helper_poll_init(dev);
+       drm_kms_helper_poll_disable(dev);
+
+       ret = disp->create(dev);
+       if (ret)
+               return ret;
+
+       if (dev->mode_config.num_crtc) {
+               ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+               if (ret)
+                       return ret;
+       }
+
+       return ret;
+}
+
+void
+nouveau_display_destroy(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_display_engine *disp = &dev_priv->engine.display;
+
+       drm_vblank_cleanup(dev);
+
+       disp->destroy(dev);
+
+       drm_kms_helper_poll_fini(dev);
+       drm_mode_config_cleanup(dev);
+}
+
 int
 nouveau_vblank_enable(struct drm_device *dev, int crtc)
 {
@@ -294,7 +471,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        /* Initialize a page flip struct */
        *s = (struct nouveau_page_flip_state)
                { { }, event, nouveau_crtc(crtc)->index,
-                 fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
+                 fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
                  new_bo->bo.offset };
 
        /* Choose the channel the flip will be handled in */
@@ -305,7 +482,10 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 
        /* Emit a page flip */
        if (dev_priv->card_type >= NV_50) {
-               ret = nv50_display_flip_next(crtc, fb, chan);
+               if (dev_priv->card_type >= NV_D0)
+                       ret = nvd0_display_flip_next(crtc, fb, chan, 0);
+               else
+                       ret = nv50_display_flip_next(crtc, fb, chan);
                if (ret) {
                        nouveau_channel_put(&chan);
                        goto fail_unreserve;
index 00bc6ea..4c2e4e5 100644 (file)
@@ -134,11 +134,13 @@ OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
  *  -EBUSY if timeout exceeded
  */
 static inline int
-READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
+READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
 {
-       uint32_t val;
+       uint64_t val;
 
        val = nvchan_rd32(chan, chan->user_get);
+        if (chan->user_get_hi)
+                val |= (uint64_t)nvchan_rd32(chan, chan->user_get_hi) << 32;
 
        /* reset counter as long as GET is still advancing, this is
         * to avoid misdetecting a GPU lockup if the GPU happens to
@@ -218,8 +220,8 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
 static int
 nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
 {
-       uint32_t cnt = 0, prev_get = 0;
-       int ret;
+       uint64_t prev_get = 0;
+       int ret, cnt = 0;
 
        ret = nv50_dma_push_wait(chan, slots + 1);
        if (unlikely(ret))
@@ -261,8 +263,8 @@ nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
 int
 nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
 {
-       uint32_t prev_get = 0, cnt = 0;
-       int get;
+       uint64_t prev_get = 0;
+       int cnt = 0, get;
 
        if (chan->dma.ib_max)
                return nv50_dma_wait(chan, slots, size);
index de5efe7..9b93b70 100644 (file)
@@ -29,6 +29,7 @@
 #include "nouveau_connector.h"
 #include "nouveau_encoder.h"
 #include "nouveau_crtc.h"
+#include "nouveau_gpio.h"
 
 /******************************************************************************
  * aux channel util functions
@@ -273,8 +274,6 @@ nouveau_dp_tu_update(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
 u8 *
 nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
 {
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nvbios *bios = &dev_priv->vbios;
        struct bit_entry d;
        u8 *table;
        int i;
@@ -289,7 +288,7 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
                return NULL;
        }
 
-       table = ROMPTR(bios, d.data[0]);
+       table = ROMPTR(dev, d.data[0]);
        if (!table) {
                NV_ERROR(dev, "displayport table pointer invalid\n");
                return NULL;
@@ -306,7 +305,7 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
        }
 
        for (i = 0; i < table[3]; i++) {
-               *entry = ROMPTR(bios, table[table[1] + (i * table[2])]);
+               *entry = ROMPTR(dev, table[table[1] + (i * table[2])]);
                if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0])))
                        return table;
        }
@@ -336,7 +335,6 @@ struct dp_state {
 static void
 dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
 {
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
        int or = dp->or, link = dp->link;
        u8 *entry, sink[2];
        u32 dp_ctrl;
@@ -360,7 +358,7 @@ dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
         * table, that has (among other things) pointers to more scripts that
         * need to be executed, this time depending on link speed.
         */
-       entry = ROMPTR(&dev_priv->vbios, dp->entry[10]);
+       entry = ROMPTR(dev, dp->entry[10]);
        if (entry) {
                if (dp->table[0] < 0x30) {
                        while (dp->link_bw < (ROM16(entry[0]) * 10))
@@ -559,8 +557,6 @@ dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
 bool
 nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
 {
-       struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
-       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
        struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
        struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
        struct nouveau_connector *nv_connector =
@@ -581,7 +577,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
 
        dp.dcb = nv_encoder->dcb;
        dp.crtc = nv_crtc->index;
-       dp.auxch = auxch->rd;
+       dp.auxch = auxch->drive;
        dp.or = nv_encoder->or;
        dp.link = !(nv_encoder->dcb->sorconf.link & 1);
        dp.dpcd = nv_encoder->dp.dpcd;
@@ -590,7 +586,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
         * we take during link training (DP_SET_POWER is one), we need
         * to ignore them for the moment to avoid races.
         */
-       pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
+       nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, false);
 
        /* enable down-spreading, if possible */
        if (dp.table[1] >= 16) {
@@ -639,7 +635,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
        nouveau_bios_run_init_table(dev, ROM16(dp.entry[8]), dp.dcb, dp.crtc);
 
        /* re-enable hotplug detect */
-       pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true);
+       nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, true);
        return true;
 }
 
@@ -656,7 +652,7 @@ nouveau_dp_detect(struct drm_encoder *encoder)
        if (!auxch)
                return false;
 
-       ret = auxch_tx(dev, auxch->rd, 9, DP_DPCD_REV, dpcd, 8);
+       ret = auxch_tx(dev, auxch->drive, 9, DP_DPCD_REV, dpcd, 8);
        if (ret)
                return false;
 
@@ -684,7 +680,7 @@ int
 nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
                 uint8_t *data, int data_nr)
 {
-       return auxch_tx(auxch->dev, auxch->rd, cmd, addr, data, data_nr);
+       return auxch_tx(auxch->dev, auxch->drive, cmd, addr, data, data_nr);
 }
 
 static int
index 9791d13..e4a7cfe 100644 (file)
@@ -124,6 +124,10 @@ MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)\n");
 int nouveau_ctxfw;
 module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
 
+MODULE_PARM_DESC(ctxfw, "Santise DCB table according to MXM-SIS\n");
+int nouveau_mxmdcb = 1;
+module_param_named(mxmdcb, nouveau_mxmdcb, int, 0400);
+
 int nouveau_fbpercrtc;
 #if 0
 module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -178,8 +182,11 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       NV_INFO(dev, "Disabling fbcon acceleration...\n");
-       nouveau_fbcon_save_disable_accel(dev);
+       NV_INFO(dev, "Disabling display...\n");
+       nouveau_display_fini(dev);
+
+       NV_INFO(dev, "Disabling fbcon...\n");
+       nouveau_fbcon_set_suspend(dev, 1);
 
        NV_INFO(dev, "Unpinning framebuffer(s)...\n");
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -220,7 +227,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
 
                ret = dev_priv->eng[e]->fini(dev, e, true);
                if (ret) {
-                       NV_ERROR(dev, "... engine %d failed: %d\n", i, ret);
+                       NV_ERROR(dev, "... engine %d failed: %d\n", e, ret);
                        goto out_abort;
                }
        }
@@ -246,10 +253,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
                pci_set_power_state(pdev, PCI_D3hot);
        }
 
-       console_lock();
-       nouveau_fbcon_set_suspend(dev, 1);
-       console_unlock();
-       nouveau_fbcon_restore_accel(dev);
        return 0;
 
 out_abort:
@@ -275,8 +278,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       nouveau_fbcon_save_disable_accel(dev);
-
        NV_INFO(dev, "We're back, enabling device...\n");
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
@@ -296,8 +297,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
        if (ret)
                return ret;
 
-       nouveau_pm_resume(dev);
-
        if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
                ret = nouveau_mem_init_agp(dev);
                if (ret) {
@@ -337,6 +336,8 @@ nouveau_pci_resume(struct pci_dev *pdev)
                }
        }
 
+       nouveau_pm_resume(dev);
+
        NV_INFO(dev, "Restoring mode...\n");
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                struct nouveau_framebuffer *nouveau_fb;
@@ -358,16 +359,10 @@ nouveau_pci_resume(struct pci_dev *pdev)
                        NV_ERROR(dev, "Could not pin/map cursor.\n");
        }
 
-       engine->display.init(dev);
-
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-               u32 offset = nv_crtc->cursor.nvbo->bo.offset;
+       nouveau_fbcon_set_suspend(dev, 0);
+       nouveau_fbcon_zfill_all(dev);
 
-               nv_crtc->cursor.set_offset(nv_crtc, offset);
-               nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
-                                                nv_crtc->cursor_saved_y);
-       }
+       nouveau_display_init(dev);
 
        /* Force CLUT to get re-loaded during modeset */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -376,18 +371,35 @@ nouveau_pci_resume(struct pci_dev *pdev)
                nv_crtc->lut.depth = 0;
        }
 
-       console_lock();
-       nouveau_fbcon_set_suspend(dev, 0);
-       console_unlock();
+       drm_helper_resume_force_mode(dev);
 
-       nouveau_fbcon_zfill_all(dev);
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+               u32 offset = nv_crtc->cursor.nvbo->bo.offset;
 
-       drm_helper_resume_force_mode(dev);
+               nv_crtc->cursor.set_offset(nv_crtc, offset);
+               nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
+                                                nv_crtc->cursor_saved_y);
+       }
 
-       nouveau_fbcon_restore_accel(dev);
        return 0;
 }
 
+static const struct file_operations nouveau_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = nouveau_ttm_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .read = drm_read,
+#if defined(CONFIG_COMPAT)
+       .compat_ioctl = nouveau_compat_ioctl,
+#endif
+       .llseek = noop_llseek,
+};
+
 static struct drm_driver driver = {
        .driver_features =
                DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
@@ -413,21 +425,7 @@ static struct drm_driver driver = {
        .disable_vblank = nouveau_vblank_disable,
        .reclaim_buffers = drm_core_reclaim_buffers,
        .ioctls = nouveau_ioctls,
-       .fops = {
-               .owner = THIS_MODULE,
-               .open = drm_open,
-               .release = drm_release,
-               .unlocked_ioctl = drm_ioctl,
-               .mmap = nouveau_ttm_mmap,
-               .poll = drm_poll,
-               .fasync = drm_fasync,
-               .read = drm_read,
-#if defined(CONFIG_COMPAT)
-               .compat_ioctl = nouveau_compat_ioctl,
-#endif
-               .llseek = noop_llseek,
-       },
-
+       .fops = &nouveau_driver_fops,
        .gem_init_object = nouveau_gem_object_new,
        .gem_free_object = nouveau_gem_object_del,
        .gem_open_object = nouveau_gem_object_open,
index 4c0be3a..38134a9 100644 (file)
@@ -163,6 +163,9 @@ enum nouveau_flags {
 #define NVOBJ_ENGINE_COPY0     3
 #define NVOBJ_ENGINE_COPY1     4
 #define NVOBJ_ENGINE_MPEG      5
+#define NVOBJ_ENGINE_PPP       NVOBJ_ENGINE_MPEG
+#define NVOBJ_ENGINE_BSP       6
+#define NVOBJ_ENGINE_VP                7
 #define NVOBJ_ENGINE_DISPLAY   15
 #define NVOBJ_ENGINE_NR                16
 
@@ -229,6 +232,7 @@ struct nouveau_channel {
        /* mapping of the regs controlling the fifo */
        void __iomem *user;
        uint32_t user_get;
+       uint32_t user_get_hi;
        uint32_t user_put;
 
        /* Fencing */
@@ -246,7 +250,7 @@ struct nouveau_channel {
        struct nouveau_gpuobj *pushbuf;
        struct nouveau_bo     *pushbuf_bo;
        struct nouveau_vma     pushbuf_vma;
-       uint32_t               pushbuf_base;
+       uint64_t               pushbuf_base;
 
        /* Notifier memory */
        struct nouveau_bo *notifier_bo;
@@ -393,24 +397,25 @@ struct nouveau_display_engine {
        int (*early_init)(struct drm_device *);
        void (*late_takedown)(struct drm_device *);
        int (*create)(struct drm_device *);
-       int (*init)(struct drm_device *);
        void (*destroy)(struct drm_device *);
+       int (*init)(struct drm_device *);
+       void (*fini)(struct drm_device *);
+
+       struct drm_property *dithering_mode;
+       struct drm_property *dithering_depth;
+       struct drm_property *underscan_property;
+       struct drm_property *underscan_hborder_property;
+       struct drm_property *underscan_vborder_property;
 };
 
 struct nouveau_gpio_engine {
-       void *priv;
-
-       int  (*init)(struct drm_device *);
-       void (*takedown)(struct drm_device *);
-
-       int  (*get)(struct drm_device *, enum dcb_gpio_tag);
-       int  (*set)(struct drm_device *, enum dcb_gpio_tag, int state);
-
-       int  (*irq_register)(struct drm_device *, enum dcb_gpio_tag,
-                            void (*)(void *, int), void *);
-       void (*irq_unregister)(struct drm_device *, enum dcb_gpio_tag,
-                              void (*)(void *, int), void *);
-       bool (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
+       spinlock_t lock;
+       struct list_head isr;
+       int (*init)(struct drm_device *);
+       void (*fini)(struct drm_device *);
+       int (*drive)(struct drm_device *, int line, int dir, int out);
+       int (*sense)(struct drm_device *, int line);
+       void (*irq_enable)(struct drm_device *, int line, bool);
 };
 
 struct nouveau_pm_voltage_level {
@@ -484,7 +489,7 @@ struct nouveau_pm_level {
        u32 copy;
        u32 daemon;
        u32 vdec;
-       u32 unk05;      /* nv50:nva3, roughly.. */
+       u32 dom6;
        u32 unka0;      /* nva3:nvc0 */
        u32 hub01;      /* nvc0- */
        u32 hub06;      /* nvc0- */
@@ -518,6 +523,12 @@ struct nouveau_pm_memtimings {
        int nr_timing;
 };
 
+struct nouveau_pm_fan {
+       u32 min_duty;
+       u32 max_duty;
+       u32 pwm_freq;
+};
+
 struct nouveau_pm_engine {
        struct nouveau_pm_voltage voltage;
        struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
@@ -525,6 +536,8 @@ struct nouveau_pm_engine {
        struct nouveau_pm_memtimings memtimings;
        struct nouveau_pm_temp_sensor_constants sensor_constants;
        struct nouveau_pm_threshold_temp threshold_temp;
+       struct nouveau_pm_fan fan;
+       u32 pwm_divisor;
 
        struct nouveau_pm_level boot;
        struct nouveau_pm_level *cur;
@@ -532,19 +545,14 @@ struct nouveau_pm_engine {
        struct device *hwmon;
        struct notifier_block acpi_nb;
 
-       int (*clock_get)(struct drm_device *, u32 id);
-       void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
-                          u32 id, int khz);
-       void (*clock_set)(struct drm_device *, void *);
-
        int  (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
        void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
-       void (*clocks_set)(struct drm_device *, void *);
+       int (*clocks_set)(struct drm_device *, void *);
 
        int (*voltage_get)(struct drm_device *);
        int (*voltage_set)(struct drm_device *, int voltage);
-       int (*fanspeed_get)(struct drm_device *);
-       int (*fanspeed_set)(struct drm_device *, int fanspeed);
+       int (*pwm_get)(struct drm_device *, int line, u32*, u32*);
+       int (*pwm_set)(struct drm_device *, int line, u32, u32);
        int (*temp_get)(struct drm_device *);
 };
 
@@ -780,6 +788,8 @@ struct drm_nouveau_private {
        struct nouveau_vm *chan_vm;
 
        struct nvbios vbios;
+       u8 *mxms;
+       struct list_head i2c_ports;
 
        struct nv04_mode_state mode_reg;
        struct nv04_mode_state saved_reg;
@@ -850,6 +860,7 @@ extern char *nouveau_perflvl;
 extern int nouveau_perflvl_wr;
 extern int nouveau_msi;
 extern int nouveau_ctxfw;
+extern int nouveau_mxmdcb;
 
 extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
 extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -1000,7 +1011,10 @@ extern int nouveau_sgdma_init(struct drm_device *);
 extern void nouveau_sgdma_takedown(struct drm_device *);
 extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
                                           uint32_t offset);
-extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
+extern struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
+                                              unsigned long size,
+                                              uint32_t page_flags,
+                                              struct page *dummy_read_page);
 
 /* nouveau_debugfs.c */
 #if defined(CONFIG_DRM_NOUVEAU_DEBUG)
@@ -1072,8 +1086,6 @@ extern int nouveau_run_vbios_init(struct drm_device *);
 extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table,
                                        struct dcb_entry *, int crtc);
 extern void nouveau_bios_init_exec(struct drm_device *, uint16_t table);
-extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
-                                                     enum dcb_gpio_tag);
 extern struct dcb_connector_table_entry *
 nouveau_bios_connector_entry(struct drm_device *, int index);
 extern u32 get_pll_register(struct drm_device *, enum pll_types);
@@ -1091,11 +1103,18 @@ extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head,
                            enum LVDS_script, int pxclk);
 bool bios_encoder_match(struct dcb_entry *, u32 hash);
 
+/* nouveau_mxm.c */
+int  nouveau_mxm_init(struct drm_device *dev);
+void nouveau_mxm_fini(struct drm_device *dev);
+
 /* nouveau_ttm.c */
 int nouveau_ttm_global_init(struct drm_nouveau_private *);
 void nouveau_ttm_global_release(struct drm_nouveau_private *);
 int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
 
+/* nouveau_hdmi.c */
+void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
+
 /* nouveau_dp.c */
 int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
                     uint8_t *data, int data_nr);
@@ -1222,6 +1241,9 @@ extern int  nvc0_graph_isr_chid(struct drm_device *dev, u64 inst);
 /* nv84_crypt.c */
 extern int  nv84_crypt_create(struct drm_device *);
 
+/* nv98_crypt.c */
+extern int  nv98_crypt_create(struct drm_device *dev);
+
 /* nva3_copy.c */
 extern int  nva3_copy_create(struct drm_device *dev);
 
@@ -1234,6 +1256,17 @@ extern int  nv31_mpeg_create(struct drm_device *dev);
 /* nv50_mpeg.c */
 extern int  nv50_mpeg_create(struct drm_device *dev);
 
+/* nv84_bsp.c */
+/* nv98_bsp.c */
+extern int  nv84_bsp_create(struct drm_device *dev);
+
+/* nv84_vp.c */
+/* nv98_vp.c */
+extern int  nv84_vp_create(struct drm_device *dev);
+
+/* nv98_ppp.c */
+extern int  nv98_ppp_create(struct drm_device *dev);
+
 /* nv04_instmem.c */
 extern int  nv04_instmem_init(struct drm_device *);
 extern void nv04_instmem_takedown(struct drm_device *);
@@ -1311,13 +1344,19 @@ extern int nv17_tv_create(struct drm_connector *, struct dcb_entry *);
 extern int nv04_display_early_init(struct drm_device *);
 extern void nv04_display_late_takedown(struct drm_device *);
 extern int nv04_display_create(struct drm_device *);
-extern int nv04_display_init(struct drm_device *);
 extern void nv04_display_destroy(struct drm_device *);
+extern int nv04_display_init(struct drm_device *);
+extern void nv04_display_fini(struct drm_device *);
 
 /* nvd0_display.c */
 extern int nvd0_display_create(struct drm_device *);
-extern int nvd0_display_init(struct drm_device *);
 extern void nvd0_display_destroy(struct drm_device *);
+extern int nvd0_display_init(struct drm_device *);
+extern void nvd0_display_fini(struct drm_device *);
+struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int crtc);
+void nvd0_display_flip_stop(struct drm_crtc *);
+int nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
+                          struct nouveau_channel *, u32 swap_interval);
 
 /* nv04_crtc.c */
 extern int nv04_crtc_create(struct drm_device *, int index);
@@ -1412,6 +1451,10 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
                                  struct drm_file *);
 
 /* nouveau_display.c */
+int nouveau_display_create(struct drm_device *dev);
+void nouveau_display_destroy(struct drm_device *dev);
+int nouveau_display_init(struct drm_device *dev);
+void nouveau_display_fini(struct drm_device *dev);
 int nouveau_vblank_enable(struct drm_device *dev, int crtc);
 void nouveau_vblank_disable(struct drm_device *dev, int crtc);
 int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
@@ -1426,23 +1469,22 @@ int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
                                 uint32_t handle);
 
 /* nv10_gpio.c */
-int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
-int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
+int nv10_gpio_init(struct drm_device *dev);
+void nv10_gpio_fini(struct drm_device *dev);
+int nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out);
+int nv10_gpio_sense(struct drm_device *dev, int line);
+void nv10_gpio_irq_enable(struct drm_device *, int line, bool on);
 
 /* nv50_gpio.c */
 int nv50_gpio_init(struct drm_device *dev);
 void nv50_gpio_fini(struct drm_device *dev);
-int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
-int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
-int nvd0_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
-int nvd0_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
-int  nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag,
-                           void (*)(void *, int), void *);
-void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag,
-                             void (*)(void *, int), void *);
-bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
-
-/* nv50_calc. */
+int nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out);
+int nv50_gpio_sense(struct drm_device *dev, int line);
+void nv50_gpio_irq_enable(struct drm_device *, int line, bool on);
+int nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out);
+int nvd0_gpio_sense(struct drm_device *dev, int line);
+
+/* nv50_calc.c */
 int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
                  int *N1, int *M1, int *N2, int *M2, int *P);
 int nva3_calc_pll(struct drm_device *, struct pll_lims *,
@@ -1565,6 +1607,13 @@ extern void nv_wo32(struct nouveau_gpuobj *, u32 offset, u32 val);
 #define NV_TRACEWARN(d, fmt, arg...) NV_PRINTK(KERN_NOTICE, d, fmt, ##arg)
 #define NV_TRACE(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
 #define NV_WARN(d, fmt, arg...) NV_PRINTK(KERN_WARNING, d, fmt, ##arg)
+#define NV_WARNONCE(d, fmt, arg...) do {                                       \
+       static int _warned = 0;                                                \
+       if (!_warned) {                                                        \
+               NV_WARN(d, fmt, ##arg);                                        \
+               _warned = 1;                                                   \
+       }                                                                      \
+} while(0)
 
 /* nouveau_reg_debug bitmask */
 enum {
index 95c843e..f3fb649 100644 (file)
@@ -42,8 +42,6 @@ nouveau_framebuffer(struct drm_framebuffer *fb)
        return container_of(fb, struct nouveau_framebuffer, base);
 }
 
-extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
-
 int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
-                            struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo);
+                            struct drm_mode_fb_cmd2 *mode_cmd, struct nouveau_bo *nvbo);
 #endif /* __NOUVEAU_FB_H__ */
index 3a4cc32..9892218 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/init.h>
 #include <linux/screen_info.h>
 #include <linux/vga_switcheroo.h>
+#include <linux/console.h>
 
 #include "drmP.h"
 #include "drm.h"
@@ -281,7 +282,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
        struct nouveau_framebuffer *nouveau_fb;
        struct nouveau_channel *chan;
        struct nouveau_bo *nvbo;
-       struct drm_mode_fb_cmd mode_cmd;
+       struct drm_mode_fb_cmd2 mode_cmd;
        struct pci_dev *pdev = dev->pdev;
        struct device *device = &pdev->dev;
        int size, ret;
@@ -289,12 +290,13 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
        mode_cmd.width = sizes->surface_width;
        mode_cmd.height = sizes->surface_height;
 
-       mode_cmd.bpp = sizes->surface_bpp;
-       mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
-       mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
-       mode_cmd.depth = sizes->surface_depth;
+       mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3);
+       mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], 256);
 
-       size = mode_cmd.pitch * mode_cmd.height;
+       mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+                                                         sizes->surface_depth);
+
+       size = mode_cmd.pitches[0] * mode_cmd.height;
        size = roundup(size, PAGE_SIZE);
 
        ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
@@ -369,7 +371,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
        info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
        info->screen_size = size;
 
-       drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+       drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
        drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
 
        /* Set aperture base/size for vesafb takeover */
@@ -547,7 +549,13 @@ void nouveau_fbcon_restore_accel(struct drm_device *dev)
 void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
+       console_lock();
+       if (state == 0)
+               nouveau_fbcon_save_disable_accel(dev);
        fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state);
+       if (state == 1)
+               nouveau_fbcon_restore_accel(dev);
+       console_unlock();
 }
 
 void nouveau_fbcon_zfill_all(struct drm_device *dev)
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.c b/drivers/gpu/drm/nouveau/nouveau_gpio.c
new file mode 100644 (file)
index 0000000..a580cc6
--- /dev/null
@@ -0,0 +1,400 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_i2c.h"
+#include "nouveau_gpio.h"
+
+static u8 *
+dcb_gpio_table(struct drm_device *dev)
+{
+       u8 *dcb = dcb_table(dev);
+       if (dcb) {
+               if (dcb[0] >= 0x30 && dcb[1] >= 0x0c)
+                       return ROMPTR(dev, dcb[0x0a]);
+               if (dcb[0] >= 0x22 && dcb[-1] >= 0x13)
+                       return ROMPTR(dev, dcb[-15]);
+       }
+       return NULL;
+}
+
+static u8 *
+dcb_gpio_entry(struct drm_device *dev, int idx, int ent, u8 *version)
+{
+       u8 *table = dcb_gpio_table(dev);
+       if (table) {
+               *version = table[0];
+               if (*version < 0x30 && ent < table[2])
+                       return table + 3 + (ent * table[1]);
+               else if (ent < table[2])
+                       return table + table[1] + (ent * table[3]);
+       }
+       return NULL;
+}
+
+int
+nouveau_gpio_drive(struct drm_device *dev, int idx, int line, int dir, int out)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+       return pgpio->drive ? pgpio->drive(dev, line, dir, out) : -ENODEV;
+}
+
+int
+nouveau_gpio_sense(struct drm_device *dev, int idx, int line)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+       return pgpio->sense ? pgpio->sense(dev, line) : -ENODEV;
+}
+
+int
+nouveau_gpio_find(struct drm_device *dev, int idx, u8 func, u8 line,
+                 struct gpio_func *gpio)
+{
+       u8 *table, *entry, version;
+       int i = -1;
+
+       if (line == 0xff && func == 0xff)
+               return -EINVAL;
+
+       while ((entry = dcb_gpio_entry(dev, idx, ++i, &version))) {
+               if (version < 0x40) {
+                       u16 data = ROM16(entry[0]);
+                       *gpio = (struct gpio_func) {
+                               .line = (data & 0x001f) >> 0,
+                               .func = (data & 0x07e0) >> 5,
+                               .log[0] = (data & 0x1800) >> 11,
+                               .log[1] = (data & 0x6000) >> 13,
+                       };
+               } else
+               if (version < 0x41) {
+                       *gpio = (struct gpio_func) {
+                               .line = entry[0] & 0x1f,
+                               .func = entry[1],
+                               .log[0] = (entry[3] & 0x18) >> 3,
+                               .log[1] = (entry[3] & 0x60) >> 5,
+                       };
+               } else {
+                       *gpio = (struct gpio_func) {
+                               .line = entry[0] & 0x3f,
+                               .func = entry[1],
+                               .log[0] = (entry[4] & 0x30) >> 4,
+                               .log[1] = (entry[4] & 0xc0) >> 6,
+                       };
+               }
+
+               if ((line == 0xff || line == gpio->line) &&
+                   (func == 0xff || func == gpio->func))
+                       return 0;
+       }
+
+       /* DCB 2.2, fixed TVDAC GPIO data */
+       if ((table = dcb_table(dev)) && table[0] >= 0x22) {
+               if (func == DCB_GPIO_TVDAC0) {
+                       *gpio = (struct gpio_func) {
+                               .func = DCB_GPIO_TVDAC0,
+                               .line = table[-4] >> 4,
+                               .log[0] = !!(table[-5] & 2),
+                               .log[1] =  !(table[-5] & 2),
+                       };
+                       return 0;
+               }
+       }
+
+       /* Apple iMac G4 NV18 */
+       if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
+               if (func == DCB_GPIO_TVDAC0) {
+                       *gpio = (struct gpio_func) {
+                               .func = DCB_GPIO_TVDAC0,
+                               .line = 4,
+                               .log[0] = 0,
+                               .log[1] = 1,
+                       };
+                       return 0;
+               }
+       }
+
+       return -EINVAL;
+}
+
+int
+nouveau_gpio_set(struct drm_device *dev, int idx, u8 tag, u8 line, int state)
+{
+       struct gpio_func gpio;
+       int ret;
+
+       ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
+       if (ret == 0) {
+               int dir = !!(gpio.log[state] & 0x02);
+               int out = !!(gpio.log[state] & 0x01);
+               ret = nouveau_gpio_drive(dev, idx, gpio.line, dir, out);
+       }
+
+       return ret;
+}
+
+int
+nouveau_gpio_get(struct drm_device *dev, int idx, u8 tag, u8 line)
+{
+       struct gpio_func gpio;
+       int ret;
+
+       ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
+       if (ret == 0) {
+               ret = nouveau_gpio_sense(dev, idx, gpio.line);
+               if (ret >= 0)
+                       ret = (ret == (gpio.log[1] & 1));
+       }
+
+       return ret;
+}
+
+int
+nouveau_gpio_irq(struct drm_device *dev, int idx, u8 tag, u8 line, bool on)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+       struct gpio_func gpio;
+       int ret;
+
+       ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
+       if (ret == 0) {
+               if (idx == 0 && pgpio->irq_enable)
+                       pgpio->irq_enable(dev, gpio.line, on);
+               else
+                       ret = -ENODEV;
+       }
+
+       return ret;
+}
+
+struct gpio_isr {
+       struct drm_device *dev;
+       struct list_head head;
+       struct work_struct work;
+       int idx;
+       struct gpio_func func;
+       void (*handler)(void *, int);
+       void *data;
+       bool inhibit;
+};
+
+static void
+nouveau_gpio_isr_bh(struct work_struct *work)
+{
+       struct gpio_isr *isr = container_of(work, struct gpio_isr, work);
+       struct drm_device *dev = isr->dev;
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+       unsigned long flags;
+       int state;
+
+       state = nouveau_gpio_get(dev, isr->idx, isr->func.func, isr->func.line);
+       if (state >= 0)
+               isr->handler(isr->data, state);
+
+       spin_lock_irqsave(&pgpio->lock, flags);
+       isr->inhibit = false;
+       spin_unlock_irqrestore(&pgpio->lock, flags);
+}
+
+void
+nouveau_gpio_isr(struct drm_device *dev, int idx, u32 line_mask)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+       struct gpio_isr *isr;
+
+       if (idx != 0)
+               return;
+
+       spin_lock(&pgpio->lock);
+       list_for_each_entry(isr, &pgpio->isr, head) {
+               if (line_mask & (1 << isr->func.line)) {
+                       if (isr->inhibit)
+                               continue;
+                       isr->inhibit = true;
+                       schedule_work(&isr->work);
+               }
+       }
+       spin_unlock(&pgpio->lock);
+}
+
+int
+nouveau_gpio_isr_add(struct drm_device *dev, int idx, u8 tag, u8 line,
+                    void (*handler)(void *, int), void *data)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+       struct gpio_isr *isr;
+       unsigned long flags;
+       int ret;
+
+       isr = kzalloc(sizeof(*isr), GFP_KERNEL);
+       if (!isr)
+               return -ENOMEM;
+
+       ret = nouveau_gpio_find(dev, idx, tag, line, &isr->func);
+       if (ret) {
+               kfree(isr);
+               return ret;
+       }
+
+       INIT_WORK(&isr->work, nouveau_gpio_isr_bh);
+       isr->dev = dev;
+       isr->handler = handler;
+       isr->data = data;
+       isr->idx = idx;
+
+       spin_lock_irqsave(&pgpio->lock, flags);
+       list_add(&isr->head, &pgpio->isr);
+       spin_unlock_irqrestore(&pgpio->lock, flags);
+       return 0;
+}
+
+void
+nouveau_gpio_isr_del(struct drm_device *dev, int idx, u8 tag, u8 line,
+                    void (*handler)(void *, int), void *data)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+       struct gpio_isr *isr, *tmp;
+       struct gpio_func func;
+       unsigned long flags;
+       LIST_HEAD(tofree);
+       int ret;
+
+       ret = nouveau_gpio_find(dev, idx, tag, line, &func);
+       if (ret == 0) {
+               spin_lock_irqsave(&pgpio->lock, flags);
+               list_for_each_entry_safe(isr, tmp, &pgpio->isr, head) {
+                       if (memcmp(&isr->func, &func, sizeof(func)) ||
+                           isr->idx != idx ||
+                           isr->handler != handler || isr->data != data)
+                               continue;
+                       list_move(&isr->head, &tofree);
+               }
+               spin_unlock_irqrestore(&pgpio->lock, flags);
+
+               list_for_each_entry_safe(isr, tmp, &tofree, head) {
+                       flush_work_sync(&isr->work);
+                       kfree(isr);
+               }
+       }
+}
+
+int
+nouveau_gpio_create(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+       INIT_LIST_HEAD(&pgpio->isr);
+       spin_lock_init(&pgpio->lock);
+
+       return nouveau_gpio_init(dev);
+}
+
+void
+nouveau_gpio_destroy(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+       nouveau_gpio_fini(dev);
+       BUG_ON(!list_empty(&pgpio->isr));
+}
+
+int
+nouveau_gpio_init(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+       int ret = 0;
+
+       if (pgpio->init)
+               ret = pgpio->init(dev);
+
+       return ret;
+}
+
+void
+nouveau_gpio_fini(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+       if (pgpio->fini)
+               pgpio->fini(dev);
+}
+
+void
+nouveau_gpio_reset(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       u8 *entry, version;
+       int ent = -1;
+
+       while ((entry = dcb_gpio_entry(dev, 0, ++ent, &version))) {
+               u8 func = 0xff, line, defs, unk0, unk1;
+               if (version >= 0x41) {
+                       defs = !!(entry[0] & 0x80);
+                       line = entry[0] & 0x3f;
+                       func = entry[1];
+                       unk0 = entry[2];
+                       unk1 = entry[3] & 0x1f;
+               } else
+               if (version >= 0x40) {
+                       line = entry[0] & 0x1f;
+                       func = entry[1];
+                       defs = !!(entry[3] & 0x01);
+                       unk0 = !!(entry[3] & 0x02);
+                       unk1 = !!(entry[3] & 0x04);
+               } else {
+                       break;
+               }
+
+               if (func == 0xff)
+                       continue;
+
+               nouveau_gpio_func_set(dev, func, defs);
+
+               if (dev_priv->card_type >= NV_D0) {
+                       nv_mask(dev, 0x00d610 + (line * 4), 0xff, unk0);
+                       if (unk1--)
+                               nv_mask(dev, 0x00d640 + (unk1 * 4), 0xff, line);
+               } else
+               if (dev_priv->card_type >= NV_50) {
+                       static const u32 regs[] = { 0xe100, 0xe28c };
+                       u32 val = (unk1 << 16) | unk0;
+                       u32 reg = regs[line >> 4]; line &= 0x0f;
+
+                       nv_mask(dev, reg, 0x00010001 << line, val << line);
+               }
+       }
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.h b/drivers/gpu/drm/nouveau/nouveau_gpio.h
new file mode 100644 (file)
index 0000000..64c5cb0
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_GPIO_H__
+#define __NOUVEAU_GPIO_H__
+
+struct gpio_func {
+       u8 func;
+       u8 line;
+       u8 log[2];
+};
+
+/* nouveau_gpio.c */
+int  nouveau_gpio_create(struct drm_device *);
+void nouveau_gpio_destroy(struct drm_device *);
+int  nouveau_gpio_init(struct drm_device *);
+void nouveau_gpio_fini(struct drm_device *);
+void nouveau_gpio_reset(struct drm_device *);
+int  nouveau_gpio_drive(struct drm_device *, int idx, int line,
+                       int dir, int out);
+int  nouveau_gpio_sense(struct drm_device *, int idx, int line);
+int  nouveau_gpio_find(struct drm_device *, int idx, u8 tag, u8 line,
+                      struct gpio_func *);
+int  nouveau_gpio_set(struct drm_device *, int idx, u8 tag, u8 line, int state);
+int  nouveau_gpio_get(struct drm_device *, int idx, u8 tag, u8 line);
+int  nouveau_gpio_irq(struct drm_device *, int idx, u8 tag, u8 line, bool on);
+void nouveau_gpio_isr(struct drm_device *, int idx, u32 mask);
+int  nouveau_gpio_isr_add(struct drm_device *, int idx, u8 tag, u8 line,
+                         void (*)(void *, int state), void *data);
+void nouveau_gpio_isr_del(struct drm_device *, int idx, u8 tag, u8 line,
+                         void (*)(void *, int state), void *data);
+
+static inline bool
+nouveau_gpio_func_valid(struct drm_device *dev, u8 tag)
+{
+       struct gpio_func func;
+       return (nouveau_gpio_find(dev, 0, tag, 0xff, &func)) == 0;
+}
+
+static inline int
+nouveau_gpio_func_set(struct drm_device *dev, u8 tag, int state)
+{
+       return nouveau_gpio_set(dev, 0, tag, 0xff, state);
+}
+
+static inline int
+nouveau_gpio_func_get(struct drm_device *dev, u8 tag)
+{
+       return nouveau_gpio_get(dev, 0, tag, 0xff);
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
new file mode 100644 (file)
index 0000000..59ea1c1
--- /dev/null
@@ -0,0 +1,258 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_connector.h"
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
+
+static bool
+hdmi_sor(struct drm_encoder *encoder)
+{
+       struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
+       if (dev_priv->chipset < 0xa3)
+               return false;
+       return true;
+}
+
+static inline u32
+hdmi_base(struct drm_encoder *encoder)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
+       if (!hdmi_sor(encoder))
+               return 0x616500 + (nv_crtc->index * 0x800);
+       return 0x61c500 + (nv_encoder->or * 0x800);
+}
+
+static void
+hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val)
+{
+       nv_wr32(encoder->dev, hdmi_base(encoder) + reg, val);
+}
+
+static u32
+hdmi_rd32(struct drm_encoder *encoder, u32 reg)
+{
+       return nv_rd32(encoder->dev, hdmi_base(encoder) + reg);
+}
+
+static u32
+hdmi_mask(struct drm_encoder *encoder, u32 reg, u32 mask, u32 val)
+{
+       u32 tmp = hdmi_rd32(encoder, reg);
+       hdmi_wr32(encoder, reg, (tmp & ~mask) | val);
+       return tmp;
+}
+
+static void
+nouveau_audio_disconnect(struct drm_encoder *encoder)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct drm_device *dev = encoder->dev;
+       u32 or = nv_encoder->or * 0x800;
+
+       if (hdmi_sor(encoder)) {
+               nv_mask(dev, 0x61c448 + or, 0x00000003, 0x00000000);
+       }
+}
+
+static void
+nouveau_audio_mode_set(struct drm_encoder *encoder,
+                      struct drm_display_mode *mode)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_connector *nv_connector;
+       struct drm_device *dev = encoder->dev;
+       u32 or = nv_encoder->or * 0x800;
+       int i;
+
+       nv_connector = nouveau_encoder_connector_get(nv_encoder);
+       if (!drm_detect_monitor_audio(nv_connector->edid)) {
+               nouveau_audio_disconnect(encoder);
+               return;
+       }
+
+       if (hdmi_sor(encoder)) {
+               nv_mask(dev, 0x61c448 + or, 0x00000001, 0x00000001);
+
+               drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
+               if (nv_connector->base.eld[0]) {
+                       u8 *eld = nv_connector->base.eld;
+                       for (i = 0; i < eld[2] * 4; i++)
+                               nv_wr32(dev, 0x61c440 + or, (i << 8) | eld[i]);
+                       for (i = eld[2] * 4; i < 0x60; i++)
+                               nv_wr32(dev, 0x61c440 + or, (i << 8) | 0x00);
+                       nv_mask(dev, 0x61c448 + or, 0x00000002, 0x00000002);
+               }
+       }
+}
+
+static void
+nouveau_hdmi_infoframe(struct drm_encoder *encoder, u32 ctrl, u8 *frame)
+{
+       /* calculate checksum for the infoframe */
+       u8 sum = 0, i;
+       for (i = 0; i < frame[2]; i++)
+               sum += frame[i];
+       frame[3] = 256 - sum;
+
+       /* disable infoframe, and write header */
+       hdmi_mask(encoder, ctrl + 0x00, 0x00000001, 0x00000000);
+       hdmi_wr32(encoder, ctrl + 0x08, *(u32 *)frame & 0xffffff);
+
+       /* register scans tell me the audio infoframe has only one set of
+        * subpack regs, according to tegra (gee nvidia, it'd be nice if we
+        * could get those docs too!), the hdmi block pads out the rest of
+        * the packet on its own.
+        */
+       if (ctrl == 0x020)
+               frame[2] = 6;
+
+       /* write out checksum and data, weird weird 7 byte register pairs */
+       for (i = 0; i < frame[2] + 1; i += 7) {
+               u32 rsubpack = ctrl + 0x0c + ((i / 7) * 8);
+               u32 *subpack = (u32 *)&frame[3 + i];
+               hdmi_wr32(encoder, rsubpack + 0, subpack[0]);
+               hdmi_wr32(encoder, rsubpack + 4, subpack[1] & 0xffffff);
+       }
+
+       /* enable the infoframe */
+       hdmi_mask(encoder, ctrl, 0x00000001, 0x00000001);
+}
+
+static void
+nouveau_hdmi_video_infoframe(struct drm_encoder *encoder,
+                            struct drm_display_mode *mode)
+{
+       const u8 Y = 0, A = 0, B = 0, S = 0, C = 0, M = 0, R = 0;
+       const u8 ITC = 0, EC = 0, Q = 0, SC = 0, VIC = 0, PR = 0;
+       const u8 bar_top = 0, bar_bottom = 0, bar_left = 0, bar_right = 0;
+       u8 frame[20];
+
+       frame[0x00] = 0x82; /* AVI infoframe */
+       frame[0x01] = 0x02; /* version */
+       frame[0x02] = 0x0d; /* length */
+       frame[0x03] = 0x00;
+       frame[0x04] = (Y << 5) | (A << 4) | (B << 2) | S;
+       frame[0x05] = (C << 6) | (M << 4) | R;
+       frame[0x06] = (ITC << 7) | (EC << 4) | (Q << 2) | SC;
+       frame[0x07] = VIC;
+       frame[0x08] = PR;
+       frame[0x09] = bar_top & 0xff;
+       frame[0x0a] = bar_top >> 8;
+       frame[0x0b] = bar_bottom & 0xff;
+       frame[0x0c] = bar_bottom >> 8;
+       frame[0x0d] = bar_left & 0xff;
+       frame[0x0e] = bar_left >> 8;
+       frame[0x0f] = bar_right & 0xff;
+       frame[0x10] = bar_right >> 8;
+       frame[0x11] = 0x00;
+       frame[0x12] = 0x00;
+       frame[0x13] = 0x00;
+
+       nouveau_hdmi_infoframe(encoder, 0x020, frame);
+}
+
+static void
+nouveau_hdmi_audio_infoframe(struct drm_encoder *encoder,
+                            struct drm_display_mode *mode)
+{
+       const u8 CT = 0x00, CC = 0x01, ceaSS = 0x00, SF = 0x00, FMT = 0x00;
+       const u8 CA = 0x00, DM_INH = 0, LSV = 0x00;
+       u8 frame[12];
+
+       frame[0x00] = 0x84;     /* Audio infoframe */
+       frame[0x01] = 0x01;     /* version */
+       frame[0x02] = 0x0a;     /* length */
+       frame[0x03] = 0x00;
+       frame[0x04] = (CT << 4) | CC;
+       frame[0x05] = (SF << 2) | ceaSS;
+       frame[0x06] = FMT;
+       frame[0x07] = CA;
+       frame[0x08] = (DM_INH << 7) | (LSV << 3);
+       frame[0x09] = 0x00;
+       frame[0x0a] = 0x00;
+       frame[0x0b] = 0x00;
+
+       nouveau_hdmi_infoframe(encoder, 0x000, frame);
+}
+
+static void
+nouveau_hdmi_disconnect(struct drm_encoder *encoder)
+{
+       nouveau_audio_disconnect(encoder);
+
+       /* disable audio and avi infoframes */
+       hdmi_mask(encoder, 0x000, 0x00000001, 0x00000000);
+       hdmi_mask(encoder, 0x020, 0x00000001, 0x00000000);
+
+       /* disable hdmi */
+       hdmi_mask(encoder, 0x0a4, 0x40000000, 0x00000000);
+}
+
+void
+nouveau_hdmi_mode_set(struct drm_encoder *encoder,
+                     struct drm_display_mode *mode)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_connector *nv_connector;
+       struct drm_device *dev = encoder->dev;
+       u32 max_ac_packet, rekey;
+
+       nv_connector = nouveau_encoder_connector_get(nv_encoder);
+       if (!mode || !nv_connector || !nv_connector->edid ||
+           !drm_detect_hdmi_monitor(nv_connector->edid)) {
+               nouveau_hdmi_disconnect(encoder);
+               return;
+       }
+
+       nouveau_hdmi_video_infoframe(encoder, mode);
+       nouveau_hdmi_audio_infoframe(encoder, mode);
+
+       hdmi_mask(encoder, 0x0d0, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+       hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+       hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+
+       nv_mask(dev, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+       nv_mask(dev, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+       nv_mask(dev, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+       /* value matches nvidia binary driver, and tegra constant */
+       rekey = 56;
+
+       max_ac_packet  = mode->htotal - mode->hdisplay;
+       max_ac_packet -= rekey;
+       max_ac_packet -= 18; /* constant from tegra */
+       max_ac_packet /= 32;
+
+       /* enable hdmi */
+       hdmi_mask(encoder, 0x0a4, 0x5f1f003f, 0x40000000 | /* enable */
+                                             0x1f000000 | /* unknown */
+                                             max_ac_packet << 16 |
+                                             rekey);
+
+       nouveau_audio_mode_set(encoder, mode);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwsq.h b/drivers/gpu/drm/nouveau/nouveau_hwsq.h
new file mode 100644 (file)
index 0000000..6976875
--- /dev/null
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_HWSQ_H__
+#define __NOUVEAU_HWSQ_H__
+
+struct hwsq_ucode {
+       u8 data[0x200];
+       union {
+               u8  *u08;
+               u16 *u16;
+               u32 *u32;
+       } ptr;
+       u16 len;
+
+       u32 reg;
+       u32 val;
+};
+
+static inline void
+hwsq_init(struct hwsq_ucode *hwsq)
+{
+       hwsq->ptr.u08 = hwsq->data;
+       hwsq->reg = 0xffffffff;
+       hwsq->val = 0xffffffff;
+}
+
+static inline void
+hwsq_fini(struct hwsq_ucode *hwsq)
+{
+       do {
+               *hwsq->ptr.u08++ = 0x7f;
+               hwsq->len = hwsq->ptr.u08 - hwsq->data;
+       } while (hwsq->len & 3);
+       hwsq->ptr.u08 = hwsq->data;
+}
+
+static inline void
+hwsq_usec(struct hwsq_ucode *hwsq, u8 usec)
+{
+       u32 shift = 0;
+       while (usec & ~3) {
+               usec >>= 2;
+               shift++;
+       }
+
+       *hwsq->ptr.u08++ = (shift << 2) | usec;
+}
+
+static inline void
+hwsq_setf(struct hwsq_ucode *hwsq, u8 flag, int val)
+{
+       flag += 0x80;
+       if (val >= 0)
+               flag += 0x20;
+       if (val >= 1)
+               flag += 0x20;
+       *hwsq->ptr.u08++ = flag;
+}
+
+static inline void
+hwsq_op5f(struct hwsq_ucode *hwsq, u8 v0, u8 v1)
+{
+       *hwsq->ptr.u08++ = 0x5f;
+       *hwsq->ptr.u08++ = v0;
+       *hwsq->ptr.u08++ = v1;
+}
+
+static inline void
+hwsq_wr32(struct hwsq_ucode *hwsq, u32 reg, u32 val)
+{
+       if (val != hwsq->val) {
+               if ((val & 0xffff0000) == (hwsq->val & 0xffff0000)) {
+                       *hwsq->ptr.u08++ = 0x42;
+                       *hwsq->ptr.u16++ = (val & 0x0000ffff);
+               } else {
+                       *hwsq->ptr.u08++ = 0xe2;
+                       *hwsq->ptr.u32++ = val;
+               }
+
+               hwsq->val = val;
+       }
+
+       if ((reg & 0xffff0000) == (hwsq->reg & 0xffff0000)) {
+               *hwsq->ptr.u08++ = 0x40;
+               *hwsq->ptr.u16++ = (reg & 0x0000ffff);
+       } else {
+               *hwsq->ptr.u08++ = 0xe0;
+               *hwsq->ptr.u32++ = reg;
+       }
+       hwsq->reg = reg;
+}
+
+#endif
index d39b220..820ae7f 100644 (file)
 #include "nouveau_i2c.h"
 #include "nouveau_hw.h"
 
+#define T_TIMEOUT  2200000
+#define T_RISEFALL 1000
+#define T_HOLD     5000
+
 static void
-nv04_i2c_setscl(void *data, int state)
+i2c_drive_scl(void *data, int state)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       struct drm_device *dev = i2c->dev;
-       uint8_t val;
-
-       val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
-       NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
+       struct nouveau_i2c_chan *port = data;
+       if (port->type == 0) {
+               u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
+               if (state) val |= 0x20;
+               else       val &= 0xdf;
+               NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
+       } else
+       if (port->type == 4) {
+               nv_mask(port->dev, port->drive, 0x2f, state ? 0x21 : 0x01);
+       } else
+       if (port->type == 5) {
+               if (state) port->state |= 0x01;
+               else       port->state &= 0xfe;
+               nv_wr32(port->dev, port->drive, 4 | port->state);
+       }
 }
 
 static void
-nv04_i2c_setsda(void *data, int state)
+i2c_drive_sda(void *data, int state)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       struct drm_device *dev = i2c->dev;
-       uint8_t val;
-
-       val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
-       NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
+       struct nouveau_i2c_chan *port = data;
+       if (port->type == 0) {
+               u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
+               if (state) val |= 0x10;
+               else       val &= 0xef;
+               NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
+       } else
+       if (port->type == 4) {
+               nv_mask(port->dev, port->drive, 0x1f, state ? 0x11 : 0x01);
+       } else
+       if (port->type == 5) {
+               if (state) port->state |= 0x02;
+               else       port->state &= 0xfd;
+               nv_wr32(port->dev, port->drive, 4 | port->state);
+       }
 }
 
 static int
-nv04_i2c_getscl(void *data)
+i2c_sense_scl(void *data)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       struct drm_device *dev = i2c->dev;
-
-       return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 4);
+       struct nouveau_i2c_chan *port = data;
+       struct drm_nouveau_private *dev_priv = port->dev->dev_private;
+       if (port->type == 0) {
+               return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x04);
+       } else
+       if (port->type == 4) {
+               return !!(nv_rd32(port->dev, port->sense) & 0x00040000);
+       } else
+       if (port->type == 5) {
+               if (dev_priv->card_type < NV_D0)
+                       return !!(nv_rd32(port->dev, port->sense) & 0x01);
+               else
+                       return !!(nv_rd32(port->dev, port->sense) & 0x10);
+       }
+       return 0;
 }
 
 static int
-nv04_i2c_getsda(void *data)
+i2c_sense_sda(void *data)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       struct drm_device *dev = i2c->dev;
-
-       return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 8);
+       struct nouveau_i2c_chan *port = data;
+       struct drm_nouveau_private *dev_priv = port->dev->dev_private;
+       if (port->type == 0) {
+               return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x08);
+       } else
+       if (port->type == 4) {
+               return !!(nv_rd32(port->dev, port->sense) & 0x00080000);
+       } else
+       if (port->type == 5) {
+               if (dev_priv->card_type < NV_D0)
+                       return !!(nv_rd32(port->dev, port->sense) & 0x02);
+               else
+                       return !!(nv_rd32(port->dev, port->sense) & 0x20);
+       }
+       return 0;
 }
 
 static void
-nv4e_i2c_setscl(void *data, int state)
+i2c_delay(struct nouveau_i2c_chan *port, u32 nsec)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       struct drm_device *dev = i2c->dev;
-       uint8_t val;
-
-       val = (nv_rd32(dev, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
-       nv_wr32(dev, i2c->wr, val | 0x01);
+       udelay((nsec + 500) / 1000);
 }
 
-static void
-nv4e_i2c_setsda(void *data, int state)
+static bool
+i2c_raise_scl(struct nouveau_i2c_chan *port)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       struct drm_device *dev = i2c->dev;
-       uint8_t val;
+       u32 timeout = T_TIMEOUT / T_RISEFALL;
+
+       i2c_drive_scl(port, 1);
+       do {
+               i2c_delay(port, T_RISEFALL);
+       } while (!i2c_sense_scl(port) && --timeout);
 
-       val = (nv_rd32(dev, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
-       nv_wr32(dev, i2c->wr, val | 0x01);
+       return timeout != 0;
 }
 
 static int
-nv4e_i2c_getscl(void *data)
+i2c_start(struct nouveau_i2c_chan *port)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       struct drm_device *dev = i2c->dev;
+       int ret = 0;
+
+       port->state  = i2c_sense_scl(port);
+       port->state |= i2c_sense_sda(port) << 1;
+       if (port->state != 3) {
+               i2c_drive_scl(port, 0);
+               i2c_drive_sda(port, 1);
+               if (!i2c_raise_scl(port))
+                       ret = -EBUSY;
+       }
 
-       return !!((nv_rd32(dev, i2c->rd) >> 16) & 4);
+       i2c_drive_sda(port, 0);
+       i2c_delay(port, T_HOLD);
+       i2c_drive_scl(port, 0);
+       i2c_delay(port, T_HOLD);
+       return ret;
 }
 
-static int
-nv4e_i2c_getsda(void *data)
+static void
+i2c_stop(struct nouveau_i2c_chan *port)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       struct drm_device *dev = i2c->dev;
-
-       return !!((nv_rd32(dev, i2c->rd) >> 16) & 8);
+       i2c_drive_scl(port, 0);
+       i2c_drive_sda(port, 0);
+       i2c_delay(port, T_RISEFALL);
+
+       i2c_drive_scl(port, 1);
+       i2c_delay(port, T_HOLD);
+       i2c_drive_sda(port, 1);
+       i2c_delay(port, T_HOLD);
 }
 
-static const uint32_t nv50_i2c_port[] = {
-       0x00e138, 0x00e150, 0x00e168, 0x00e180,
-       0x00e254, 0x00e274, 0x00e764, 0x00e780,
-       0x00e79c, 0x00e7b8
-};
-#define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port)
-
 static int
-nv50_i2c_getscl(void *data)
+i2c_bitw(struct nouveau_i2c_chan *port, int sda)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       struct drm_device *dev = i2c->dev;
+       i2c_drive_sda(port, sda);
+       i2c_delay(port, T_RISEFALL);
 
-       return !!(nv_rd32(dev, i2c->rd) & 1);
-}
+       if (!i2c_raise_scl(port))
+               return -ETIMEDOUT;
+       i2c_delay(port, T_HOLD);
 
+       i2c_drive_scl(port, 0);
+       i2c_delay(port, T_HOLD);
+       return 0;
+}
 
 static int
-nv50_i2c_getsda(void *data)
+i2c_bitr(struct nouveau_i2c_chan *port)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       struct drm_device *dev = i2c->dev;
+       int sda;
+
+       i2c_drive_sda(port, 1);
+       i2c_delay(port, T_RISEFALL);
 
-       return !!(nv_rd32(dev, i2c->rd) & 2);
+       if (!i2c_raise_scl(port))
+               return -ETIMEDOUT;
+       i2c_delay(port, T_HOLD);
+
+       sda = i2c_sense_sda(port);
+
+       i2c_drive_scl(port, 0);
+       i2c_delay(port, T_HOLD);
+       return sda;
 }
 
-static void
-nv50_i2c_setscl(void *data, int state)
+static int
+i2c_get_byte(struct nouveau_i2c_chan *port, u8 *byte, bool last)
 {
-       struct nouveau_i2c_chan *i2c = data;
+       int i, bit;
+
+       *byte = 0;
+       for (i = 7; i >= 0; i--) {
+               bit = i2c_bitr(port);
+               if (bit < 0)
+                       return bit;
+               *byte |= bit << i;
+       }
 
-       nv_wr32(i2c->dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0));
+       return i2c_bitw(port, last ? 1 : 0);
 }
 
-static void
-nv50_i2c_setsda(void *data, int state)
+static int
+i2c_put_byte(struct nouveau_i2c_chan *port, u8 byte)
 {
-       struct nouveau_i2c_chan *i2c = data;
+       int i, ret;
+       for (i = 7; i >= 0; i--) {
+               ret = i2c_bitw(port, !!(byte & (1 << i)));
+               if (ret < 0)
+                       return ret;
+       }
 
-       nv_mask(i2c->dev, i2c->wr, 0x00000006, 4 | (state ? 2 : 0));
-       i2c->data = state;
+       ret = i2c_bitr(port);
+       if (ret == 1) /* nack */
+               ret = -EIO;
+       return ret;
 }
 
 static int
-nvd0_i2c_getscl(void *data)
+i2c_addr(struct nouveau_i2c_chan *port, struct i2c_msg *msg)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       return !!(nv_rd32(i2c->dev, i2c->rd) & 0x10);
+       u32 addr = msg->addr << 1;
+       if (msg->flags & I2C_M_RD)
+               addr |= 1;
+       return i2c_put_byte(port, addr);
 }
 
 static int
-nvd0_i2c_getsda(void *data)
+i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
 {
-       struct nouveau_i2c_chan *i2c = data;
-       return !!(nv_rd32(i2c->dev, i2c->rd) & 0x20);
+       struct nouveau_i2c_chan *port = (struct nouveau_i2c_chan *)adap;
+       struct i2c_msg *msg = msgs;
+       int ret = 0, mcnt = num;
+
+       while (!ret && mcnt--) {
+               u8 remaining = msg->len;
+               u8 *ptr = msg->buf;
+
+               ret = i2c_start(port);
+               if (ret == 0)
+                       ret = i2c_addr(port, msg);
+
+               if (msg->flags & I2C_M_RD) {
+                       while (!ret && remaining--)
+                               ret = i2c_get_byte(port, ptr++, !remaining);
+               } else {
+                       while (!ret && remaining--)
+                               ret = i2c_put_byte(port, *ptr++);
+               }
+
+               msg++;
+       }
+
+       i2c_stop(port);
+       return (ret < 0) ? ret : num;
 }
 
-int
-nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
+static u32
+i2c_bit_func(struct i2c_adapter *adap)
 {
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_i2c_chan *i2c;
-       int ret;
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+const struct i2c_algorithm i2c_bit_algo = {
+       .master_xfer = i2c_bit_xfer,
+       .functionality = i2c_bit_func
+};
+
+static const uint32_t nv50_i2c_port[] = {
+       0x00e138, 0x00e150, 0x00e168, 0x00e180,
+       0x00e254, 0x00e274, 0x00e764, 0x00e780,
+       0x00e79c, 0x00e7b8
+};
 
-       if (entry->chan)
-               return -EEXIST;
+static u8 *
+i2c_table(struct drm_device *dev, u8 *version)
+{
+       u8 *dcb = dcb_table(dev), *i2c = NULL;
+       if (dcb) {
+               if (dcb[0] >= 0x15)
+                       i2c = ROMPTR(dev, dcb[2]);
+               if (dcb[0] >= 0x30)
+                       i2c = ROMPTR(dev, dcb[4]);
+       }
 
-       if (dev_priv->card_type >= NV_50 &&
-           dev_priv->card_type <= NV_C0 && entry->read >= NV50_I2C_PORTS) {
-               NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
-               return -EINVAL;
+       /* early revisions had no version number, use dcb version */
+       if (i2c) {
+               *version = dcb[0];
+               if (*version >= 0x30)
+                       *version = i2c[0];
        }
 
-       i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
-       if (i2c == NULL)
-               return -ENOMEM;
-
-       switch (entry->port_type) {
-       case 0:
-               i2c->bit.setsda = nv04_i2c_setsda;
-               i2c->bit.setscl = nv04_i2c_setscl;
-               i2c->bit.getsda = nv04_i2c_getsda;
-               i2c->bit.getscl = nv04_i2c_getscl;
-               i2c->rd = entry->read;
-               i2c->wr = entry->write;
-               break;
-       case 4:
-               i2c->bit.setsda = nv4e_i2c_setsda;
-               i2c->bit.setscl = nv4e_i2c_setscl;
-               i2c->bit.getsda = nv4e_i2c_getsda;
-               i2c->bit.getscl = nv4e_i2c_getscl;
-               i2c->rd = 0x600800 + entry->read;
-               i2c->wr = 0x600800 + entry->write;
-               break;
-       case 5:
-               i2c->bit.setsda = nv50_i2c_setsda;
-               i2c->bit.setscl = nv50_i2c_setscl;
-               if (dev_priv->card_type < NV_D0) {
-                       i2c->bit.getsda = nv50_i2c_getsda;
-                       i2c->bit.getscl = nv50_i2c_getscl;
-                       i2c->rd = nv50_i2c_port[entry->read];
-                       i2c->wr = i2c->rd;
-               } else {
-                       i2c->bit.getsda = nvd0_i2c_getsda;
-                       i2c->bit.getscl = nvd0_i2c_getscl;
-                       i2c->rd = 0x00d014 + (entry->read * 0x20);
-                       i2c->wr = i2c->rd;
-               }
-               break;
-       case 6:
-               i2c->rd = entry->read;
-               i2c->wr = entry->write;
-               break;
-       default:
-               NV_ERROR(dev, "DCB I2C port type %d unknown\n",
-                        entry->port_type);
-               kfree(i2c);
-               return -EINVAL;
+       return i2c;
+}
+
+int
+nouveau_i2c_init(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nvbios *bios = &dev_priv->vbios;
+       struct nouveau_i2c_chan *port;
+       u8 *i2c, *entry, legacy[2][4] = {};
+       u8 version, entries, recordlen;
+       int ret, i;
+
+       INIT_LIST_HEAD(&dev_priv->i2c_ports);
+
+       i2c = i2c_table(dev, &version);
+       if (!i2c) {
+               u8 *bmp = &bios->data[bios->offset];
+               if (bios->type != NVBIOS_BMP)
+                       return -ENODEV;
+
+               legacy[0][0] = NV_CIO_CRE_DDC_WR__INDEX;
+               legacy[0][1] = NV_CIO_CRE_DDC_STATUS__INDEX;
+               legacy[1][0] = NV_CIO_CRE_DDC0_WR__INDEX;
+               legacy[1][1] = NV_CIO_CRE_DDC0_STATUS__INDEX;
+
+               /* BMP (from v4.0) has i2c info in the structure, it's in a
+                * fixed location on earlier VBIOS
+                */
+               if (bmp[5] < 4)
+                       i2c = &bios->data[0x48];
+               else
+                       i2c = &bmp[0x36];
+
+               if (i2c[4]) legacy[0][0] = i2c[4];
+               if (i2c[5]) legacy[0][1] = i2c[5];
+               if (i2c[6]) legacy[1][0] = i2c[6];
+               if (i2c[7]) legacy[1][1] = i2c[7];
        }
 
-       snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
-                "nouveau-%s-%d", pci_name(dev->pdev), index);
-       i2c->adapter.owner = THIS_MODULE;
-       i2c->adapter.dev.parent = &dev->pdev->dev;
-       i2c->dev = dev;
-       i2c_set_adapdata(&i2c->adapter, i2c);
-
-       if (entry->port_type < 6) {
-               i2c->adapter.algo_data = &i2c->bit;
-               i2c->bit.udelay = 40;
-               i2c->bit.timeout = usecs_to_jiffies(5000);
-               i2c->bit.data = i2c;
-               ret = i2c_bit_add_bus(&i2c->adapter);
+       if (i2c && version >= 0x30) {
+               entry     = i2c[1] + i2c;
+               entries   = i2c[2];
+               recordlen = i2c[3];
+       } else
+       if (i2c) {
+               entry     = i2c;
+               entries   = 16;
+               recordlen = 4;
        } else {
-               i2c->adapter.algo = &nouveau_dp_i2c_algo;
-               ret = i2c_add_adapter(&i2c->adapter);
+               entry     = legacy[0];
+               entries   = 2;
+               recordlen = 4;
        }
 
-       if (ret) {
-               NV_ERROR(dev, "Failed to register i2c %d\n", index);
-               kfree(i2c);
-               return ret;
+       for (i = 0; i < entries; i++, entry += recordlen) {
+               port = kzalloc(sizeof(*port), GFP_KERNEL);
+               if (port == NULL) {
+                       nouveau_i2c_fini(dev);
+                       return -ENOMEM;
+               }
+
+               port->type = entry[3];
+               if (version < 0x30) {
+                       port->type &= 0x07;
+                       if (port->type == 0x07)
+                               port->type = 0xff;
+               }
+
+               if (port->type == 0xff) {
+                       kfree(port);
+                       continue;
+               }
+
+               switch (port->type) {
+               case 0: /* NV04:NV50 */
+                       port->drive = entry[0];
+                       port->sense = entry[1];
+                       port->adapter.algo = &i2c_bit_algo;
+                       break;
+               case 4: /* NV4E */
+                       port->drive = 0x600800 + entry[1];
+                       port->sense = port->drive;
+                       port->adapter.algo = &i2c_bit_algo;
+                       break;
+               case 5: /* NV50- */
+                       port->drive = entry[0] & 0x0f;
+                       if (dev_priv->card_type < NV_D0) {
+                               if (port->drive >= ARRAY_SIZE(nv50_i2c_port))
+                                       break;
+                               port->drive = nv50_i2c_port[port->drive];
+                               port->sense = port->drive;
+                       } else {
+                               port->drive = 0x00d014 + (port->drive * 0x20);
+                               port->sense = port->drive;
+                       }
+                       port->adapter.algo = &i2c_bit_algo;
+                       break;
+               case 6: /* NV50- DP AUX */
+                       port->drive = entry[0];
+                       port->sense = port->drive;
+                       port->adapter.algo = &nouveau_dp_i2c_algo;
+                       break;
+               default:
+                       break;
+               }
+
+               if (!port->adapter.algo) {
+                       NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n",
+                                i, port->type, port->drive, port->sense);
+                       kfree(port);
+                       continue;
+               }
+
+               snprintf(port->adapter.name, sizeof(port->adapter.name),
+                        "nouveau-%s-%d", pci_name(dev->pdev), i);
+               port->adapter.owner = THIS_MODULE;
+               port->adapter.dev.parent = &dev->pdev->dev;
+               port->dev = dev;
+               port->index = i;
+               port->dcb = ROM32(entry[0]);
+               i2c_set_adapdata(&port->adapter, i2c);
+
+               ret = i2c_add_adapter(&port->adapter);
+               if (ret) {
+                       NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret);
+                       kfree(port);
+                       continue;
+               }
+
+               list_add_tail(&port->head, &dev_priv->i2c_ports);
        }
 
-       entry->chan = i2c;
        return 0;
 }
 
 void
-nouveau_i2c_fini(struct drm_device *dev, struct dcb_i2c_entry *entry)
+nouveau_i2c_fini(struct drm_device *dev)
 {
-       if (!entry->chan)
-               return;
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_i2c_chan *port, *tmp;
 
-       i2c_del_adapter(&entry->chan->adapter);
-       kfree(entry->chan);
-       entry->chan = NULL;
+       list_for_each_entry_safe(port, tmp, &dev_priv->i2c_ports, head) {
+               i2c_del_adapter(&port->adapter);
+               kfree(port);
+       }
 }
 
 struct nouveau_i2c_chan *
-nouveau_i2c_find(struct drm_device *dev, int index)
+nouveau_i2c_find(struct drm_device *dev, u8 index)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct dcb_i2c_entry *i2c = &dev_priv->vbios.dcb.i2c[index];
+       struct nouveau_i2c_chan *port;
+
+       if (index == NV_I2C_DEFAULT(0) ||
+           index == NV_I2C_DEFAULT(1)) {
+               u8 version, *i2c = i2c_table(dev, &version);
+               if (i2c && version >= 0x30) {
+                       if (index == NV_I2C_DEFAULT(0))
+                               index = (i2c[4] & 0x0f);
+                       else
+                               index = (i2c[4] & 0xf0) >> 4;
+               } else {
+                       index = 2;
+               }
+       }
 
-       if (index >= DCB_MAX_NUM_I2C_ENTRIES)
-               return NULL;
+       list_for_each_entry(port, &dev_priv->i2c_ports, head) {
+               if (port->index == index)
+                       break;
+       }
 
-       if (dev_priv->card_type >= NV_50 && (i2c->entry & 0x00000100)) {
-               uint32_t reg = 0xe500, val;
+       if (&port->head == &dev_priv->i2c_ports)
+               return NULL;
 
-               if (i2c->port_type == 6) {
-                       reg += i2c->read * 0x50;
+       if (dev_priv->card_type >= NV_50 && (port->dcb & 0x00000100)) {
+               u32 reg = 0x00e500, val;
+               if (port->type == 6) {
+                       reg += port->drive * 0x50;
                        val  = 0x2002;
                } else {
-                       reg += ((i2c->entry & 0x1e00) >> 9) * 0x50;
+                       reg += ((port->dcb & 0x1e00) >> 9) * 0x50;
                        val  = 0xe001;
                }
 
@@ -294,9 +497,7 @@ nouveau_i2c_find(struct drm_device *dev, int index)
                nv_mask(dev, reg + 0x00, 0x0000f003, val);
        }
 
-       if (!i2c->chan && nouveau_i2c_init(dev, i2c, index))
-               return NULL;
-       return i2c->chan;
+       return port;
 }
 
 bool
@@ -331,9 +532,13 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
        struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index);
        int i;
 
-       NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index);
+       if (!i2c) {
+               NV_DEBUG(dev, "No bus when probing %s on %d\n", what, index);
+               return -ENODEV;
+       }
 
-       for (i = 0; i2c && info[i].addr; i++) {
+       NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, i2c->index);
+       for (i = 0; info[i].addr; i++) {
                if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
                    (!match || match(i2c, &info[i]))) {
                        NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
@@ -342,6 +547,5 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
        }
 
        NV_DEBUG(dev, "No devices found.\n");
-
        return -ENODEV;
 }
index 422b62f..4d2e4e9 100644 (file)
 #include <linux/i2c-algo-bit.h>
 #include "drm_dp_helper.h"
 
-struct dcb_i2c_entry;
+#define NV_I2C_PORT(n)    (0x00 + (n))
+#define NV_I2C_PORT_NUM    0x10
+#define NV_I2C_DEFAULT(n) (0x80 + (n))
 
 struct nouveau_i2c_chan {
        struct i2c_adapter adapter;
        struct drm_device *dev;
-       struct i2c_algo_bit_data bit;
-       unsigned rd;
-       unsigned wr;
-       unsigned data;
+       struct list_head head;
+       u8  index;
+       u8  type;
+       u32 dcb;
+       u32 drive;
+       u32 sense;
+       u32 state;
 };
 
-int nouveau_i2c_init(struct drm_device *, struct dcb_i2c_entry *, int index);
-void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *);
-struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
+int  nouveau_i2c_init(struct drm_device *);
+void nouveau_i2c_fini(struct drm_device *);
+struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, u8 index);
 bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr);
 int nouveau_i2c_identify(struct drm_device *dev, const char *what,
                         struct i2c_board_info *info,
index 36bec48..c3a5745 100644 (file)
@@ -407,6 +407,12 @@ nouveau_mem_vram_init(struct drm_device *dev)
        ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
        if (ret)
                return ret;
+       ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
+       if (ret) {
+               /* Reset to default value. */
+               pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
+       }
+
 
        ret = nouveau_ttm_global_init(dev_priv);
        if (ret)
@@ -638,10 +644,10 @@ nouveau_mem_timing_init(struct drm_device *dev)
                        return;
 
                if (P.version == 1)
-                       hdr = (struct nouveau_pm_tbl_header *) ROMPTR(bios, P.data[4]);
+                       hdr = (struct nouveau_pm_tbl_header *) ROMPTR(dev, P.data[4]);
                else
                if (P.version == 2)
-                       hdr = (struct nouveau_pm_tbl_header *) ROMPTR(bios, P.data[8]);
+                       hdr = (struct nouveau_pm_tbl_header *) ROMPTR(dev, P.data[8]);
                else {
                        NV_WARN(dev, "unknown mem for BIT P %d\n", P.version);
                }
diff --git a/drivers/gpu/drm/nouveau/nouveau_mxm.c b/drivers/gpu/drm/nouveau/nouveau_mxm.c
new file mode 100644 (file)
index 0000000..8bccddf
--- /dev/null
@@ -0,0 +1,677 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <linux/acpi.h>
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+
+#define MXM_DBG(dev, fmt, args...) NV_DEBUG((dev), "MXM: " fmt, ##args)
+#define MXM_MSG(dev, fmt, args...) NV_INFO((dev), "MXM: " fmt, ##args)
+
+static u8 *
+mxms_data(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       return dev_priv->mxms;
+
+}
+
+static u16
+mxms_version(struct drm_device *dev)
+{
+       u8 *mxms = mxms_data(dev);
+       u16 version = (mxms[4] << 8) | mxms[5];
+       switch (version ) {
+       case 0x0200:
+       case 0x0201:
+       case 0x0300:
+               return version;
+       default:
+               break;
+       }
+
+       MXM_DBG(dev, "unknown version %d.%d\n", mxms[4], mxms[5]);
+       return 0x0000;
+}
+
+static u16
+mxms_headerlen(struct drm_device *dev)
+{
+       return 8;
+}
+
+static u16
+mxms_structlen(struct drm_device *dev)
+{
+       return *(u16 *)&mxms_data(dev)[6];
+}
+
+static bool
+mxms_checksum(struct drm_device *dev)
+{
+       u16 size = mxms_headerlen(dev) + mxms_structlen(dev);
+       u8 *mxms = mxms_data(dev), sum = 0;
+       while (size--)
+               sum += *mxms++;
+       if (sum) {
+               MXM_DBG(dev, "checksum invalid\n");
+               return false;
+       }
+       return true;
+}
+
+static bool
+mxms_valid(struct drm_device *dev)
+{
+       u8 *mxms = mxms_data(dev);
+       if (*(u32 *)mxms != 0x5f4d584d) {
+               MXM_DBG(dev, "signature invalid\n");
+               return false;
+       }
+
+       if (!mxms_version(dev) || !mxms_checksum(dev))
+               return false;
+
+       return true;
+}
+
+static bool
+mxms_foreach(struct drm_device *dev, u8 types,
+            bool (*exec)(struct drm_device *, u8 *, void *), void *info)
+{
+       u8 *mxms = mxms_data(dev);
+       u8 *desc = mxms + mxms_headerlen(dev);
+       u8 *fini = desc + mxms_structlen(dev) - 1;
+       while (desc < fini) {
+               u8 type = desc[0] & 0x0f;
+               u8 headerlen = 0;
+               u8 recordlen = 0;
+               u8 entries = 0;
+
+               switch (type) {
+               case 0: /* Output Device Structure */
+                       if (mxms_version(dev) >= 0x0300)
+                               headerlen = 8;
+                       else
+                               headerlen = 6;
+                       break;
+               case 1: /* System Cooling Capability Structure */
+               case 2: /* Thermal Structure */
+               case 3: /* Input Power Structure */
+                       headerlen = 4;
+                       break;
+               case 4: /* GPIO Device Structure */
+                       headerlen = 4;
+                       recordlen = 2;
+                       entries   = (ROM32(desc[0]) & 0x01f00000) >> 20;
+                       break;
+               case 5: /* Vendor Specific Structure */
+                       headerlen = 8;
+                       break;
+               case 6: /* Backlight Control Structure */
+                       if (mxms_version(dev) >= 0x0300) {
+                               headerlen = 4;
+                               recordlen = 8;
+                               entries   = (desc[1] & 0xf0) >> 4;
+                       } else {
+                               headerlen = 8;
+                       }
+                       break;
+               case 7: /* Fan Control Structure */
+                       headerlen = 8;
+                       recordlen = 4;
+                       entries   = desc[1] & 0x07;
+                       break;
+               default:
+                       MXM_DBG(dev, "unknown descriptor type %d\n", type);
+                       return false;
+               }
+
+               if ((drm_debug & DRM_UT_DRIVER) && (exec == NULL)) {
+                       static const char * mxms_desc_name[] = {
+                               "ODS", "SCCS", "TS", "IPS",
+                               "GSD", "VSS", "BCS", "FCS",
+                       };
+                       u8 *dump = desc;
+                       int i, j;
+
+                       MXM_DBG(dev, "%4s: ", mxms_desc_name[type]);
+                       for (j = headerlen - 1; j >= 0; j--)
+                               printk("%02x", dump[j]);
+                       printk("\n");
+                       dump += headerlen;
+
+                       for (i = 0; i < entries; i++, dump += recordlen) {
+                               MXM_DBG(dev, "      ");
+                               for (j = recordlen - 1; j >= 0; j--)
+                                       printk("%02x", dump[j]);
+                               printk("\n");
+                       }
+               }
+
+               if (types & (1 << type)) {
+                       if (!exec(dev, desc, info))
+                               return false;
+               }
+
+               desc += headerlen + (entries * recordlen);
+       }
+
+       return true;
+}
+
+static u8 *
+mxm_table(struct drm_device *dev, u8 *size)
+{
+       struct bit_entry x;
+
+       if (bit_table(dev, 'x', &x)) {
+               MXM_DBG(dev, "BIT 'x' table not present\n");
+               return NULL;
+       }
+
+       if (x.version != 1 || x.length < 3) {
+               MXM_MSG(dev, "BIT x table %d/%d unknown\n",
+                       x.version, x.length);
+               return NULL;
+       }
+
+       *size = x.length;
+       return x.data;
+}
+
+/* These map MXM v2.x digital connection values to the appropriate SOR/link,
+ * hopefully they're correct for all boards within the same chipset...
+ *
+ * MXM v3.x VBIOS are nicer and provide pointers to these tables.
+ */
+static u8 nv84_sor_map[16] = {
+       0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv92_sor_map[16] = {
+       0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
+       0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv94_sor_map[16] = {
+       0x00, 0x14, 0x24, 0x11, 0x34, 0x31, 0x11, 0x31,
+       0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv96_sor_map[16] = {
+       0x00, 0x14, 0x24, 0x00, 0x34, 0x00, 0x11, 0x31,
+       0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv98_sor_map[16] = {
+       0x00, 0x14, 0x12, 0x11, 0x00, 0x31, 0x11, 0x31,
+       0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8
+mxm_sor_map(struct drm_device *dev, u8 conn)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       u8 len, *mxm = mxm_table(dev, &len);
+       if (mxm && len >= 6) {
+               u8 *map = ROMPTR(dev, mxm[4]);
+               if (map) {
+                       if (map[0] == 0x10) {
+                               if (conn < map[3])
+                                       return map[map[1] + conn];
+                               return 0x00;
+                       }
+
+                       MXM_MSG(dev, "unknown sor map 0x%02x\n", map[0]);
+               }
+       }
+
+       if (dev_priv->chipset == 0x84 || dev_priv->chipset == 0x86)
+               return nv84_sor_map[conn];
+       if (dev_priv->chipset == 0x92)
+               return nv92_sor_map[conn];
+       if (dev_priv->chipset == 0x94)
+               return nv94_sor_map[conn];
+       if (dev_priv->chipset == 0x96)
+               return nv96_sor_map[conn];
+       if (dev_priv->chipset == 0x98)
+               return nv98_sor_map[conn];
+
+       MXM_MSG(dev, "missing sor map\n");
+       return 0x00;
+}
+
+static u8
+mxm_ddc_map(struct drm_device *dev, u8 port)
+{
+       u8 len, *mxm = mxm_table(dev, &len);
+       if (mxm && len >= 8) {
+               u8 *map = ROMPTR(dev, mxm[6]);
+               if (map) {
+                       if (map[0] == 0x10) {
+                               if (port < map[3])
+                                       return map[map[1] + port];
+                               return 0x00;
+                       }
+
+                       MXM_MSG(dev, "unknown ddc map 0x%02x\n", map[0]);
+               }
+       }
+
+       /* v2.x: directly write port as dcb i2cidx */
+       return (port << 4) | port;
+}
+
+struct mxms_odev {
+       u8 outp_type;
+       u8 conn_type;
+       u8 ddc_port;
+       u8 dig_conn;
+};
+
+static void
+mxms_output_device(struct drm_device *dev, u8 *pdata, struct mxms_odev *desc)
+{
+       u64 data = ROM32(pdata[0]);
+       if (mxms_version(dev) >= 0x0300)
+               data |= (u64)ROM16(pdata[4]) << 32;
+
+       desc->outp_type = (data & 0x00000000000000f0ULL) >> 4;
+       desc->ddc_port  = (data & 0x0000000000000f00ULL) >> 8;
+       desc->conn_type = (data & 0x000000000001f000ULL) >> 12;
+       desc->dig_conn  = (data & 0x0000000000780000ULL) >> 19;
+}
+
+struct context {
+       u32 *outp;
+       struct mxms_odev desc;
+};
+
+static bool
+mxm_match_tmds_partner(struct drm_device *dev, u8 *data, void *info)
+{
+       struct context *ctx = info;
+       struct mxms_odev desc;
+
+       mxms_output_device(dev, data, &desc);
+       if (desc.outp_type == 2 &&
+           desc.dig_conn == ctx->desc.dig_conn)
+               return false;
+       return true;
+}
+
+static bool
+mxm_match_dcb(struct drm_device *dev, u8 *data, void *info)
+{
+       struct context *ctx = info;
+       u64 desc = *(u64 *)data;
+
+       mxms_output_device(dev, data, &ctx->desc);
+
+       /* match dcb encoder type to mxm-ods device type */
+       if ((ctx->outp[0] & 0x0000000f) != ctx->desc.outp_type)
+               return true;
+
+       /* digital output, have some extra stuff to match here, there's a
+        * table in the vbios that provides a mapping from the mxm digital
+        * connection enum values to SOR/link
+        */
+       if ((desc & 0x00000000000000f0) >= 0x20) {
+               /* check against sor index */
+               u8 link = mxm_sor_map(dev, ctx->desc.dig_conn);
+               if ((ctx->outp[0] & 0x0f000000) != (link & 0x0f) << 24)
+                       return true;
+
+               /* check dcb entry has a compatible link field */
+               link = (link & 0x30) >> 4;
+               if ((link & ((ctx->outp[1] & 0x00000030) >> 4)) != link)
+                       return true;
+       }
+
+       /* mark this descriptor accounted for by setting invalid device type,
+        * except of course some manufactures don't follow specs properly and
+        * we need to avoid killing off the TMDS function on DP connectors
+        * if MXM-SIS is missing an entry for it.
+        */
+       data[0] &= ~0xf0;
+       if (ctx->desc.outp_type == 6 && ctx->desc.conn_type == 6 &&
+           mxms_foreach(dev, 0x01, mxm_match_tmds_partner, ctx)) {
+               data[0] |= 0x20; /* modify descriptor to match TMDS now */
+       } else {
+               data[0] |= 0xf0;
+       }
+
+       return false;
+}
+
+static int
+mxm_dcb_sanitise_entry(struct drm_device *dev, void *data, int idx, u8 *dcbe)
+{
+       struct context ctx = { .outp = (u32 *)dcbe };
+       u8 type, i2cidx, link;
+       u8 *conn;
+
+       /* look for an output device structure that matches this dcb entry.
+        * if one isn't found, disable it.
+        */
+       if (mxms_foreach(dev, 0x01, mxm_match_dcb, &ctx)) {
+               MXM_DBG(dev, "disable %d: 0x%08x 0x%08x\n",
+                       idx, ctx.outp[0], ctx.outp[1]);
+               ctx.outp[0] |= 0x0000000f;
+               return 0;
+       }
+
+       /* modify the output's ddc/aux port, there's a pointer to a table
+        * with the mapping from mxm ddc/aux port to dcb i2c_index in the
+        * vbios mxm table
+        */
+       i2cidx = mxm_ddc_map(dev, ctx.desc.ddc_port);
+       if ((ctx.outp[0] & 0x0000000f) != OUTPUT_DP)
+               i2cidx = (i2cidx & 0x0f) << 4;
+       else
+               i2cidx = (i2cidx & 0xf0);
+
+       if (i2cidx != 0xf0) {
+               ctx.outp[0] &= ~0x000000f0;
+               ctx.outp[0] |= i2cidx;
+       }
+
+       /* override dcb sorconf.link, based on what mxm data says */
+       switch (ctx.desc.outp_type) {
+       case 0x00: /* Analog CRT */
+       case 0x01: /* Analog TV/HDTV */
+               break;
+       default:
+               link = mxm_sor_map(dev, ctx.desc.dig_conn) & 0x30;
+               ctx.outp[1] &= ~0x00000030;
+               ctx.outp[1] |= link;
+               break;
+       }
+
+       /* we may need to fixup various other vbios tables based on what
+        * the descriptor says the connector type should be.
+        *
+        * in a lot of cases, the vbios tables will claim DVI-I is possible,
+        * and the mxm data says the connector is really HDMI.  another
+        * common example is DP->eDP.
+        */
+       conn = dcb_conn(dev, (ctx.outp[0] & 0x0000f000) >> 12);
+       type = conn[0];
+       switch (ctx.desc.conn_type) {
+       case 0x01: /* LVDS */
+               ctx.outp[1] |= 0x00000004; /* use_power_scripts */
+               /* XXX: modify default link width in LVDS table */
+               break;
+       case 0x02: /* HDMI */
+               type = DCB_CONNECTOR_HDMI_1;
+               break;
+       case 0x03: /* DVI-D */
+               type = DCB_CONNECTOR_DVI_D;
+               break;
+       case 0x0e: /* eDP, falls through to DPint */
+               ctx.outp[1] |= 0x00010000;
+       case 0x07: /* DP internal, wtf is this?? HP8670w */
+               ctx.outp[1] |= 0x00000004; /* use_power_scripts? */
+               type = DCB_CONNECTOR_eDP;
+               break;
+       default:
+               break;
+       }
+
+       if (mxms_version(dev) >= 0x0300)
+               conn[0] = type;
+
+       return 0;
+}
+
+static bool
+mxm_show_unmatched(struct drm_device *dev, u8 *data, void *info)
+{
+       u64 desc = *(u64 *)data;
+       if ((desc & 0xf0) != 0xf0)
+               MXM_MSG(dev, "unmatched output device 0x%016llx\n", desc);
+       return true;
+}
+
+static void
+mxm_dcb_sanitise(struct drm_device *dev)
+{
+       u8 *dcb = dcb_table(dev);
+       if (!dcb || dcb[0] != 0x40) {
+               MXM_DBG(dev, "unsupported DCB version\n");
+               return;
+       }
+
+       dcb_outp_foreach(dev, NULL, mxm_dcb_sanitise_entry);
+       mxms_foreach(dev, 0x01, mxm_show_unmatched, NULL);
+}
+
+static bool
+mxm_shadow_rom_fetch(struct nouveau_i2c_chan *i2c, u8 addr,
+                    u8 offset, u8 size, u8 *data)
+{
+       struct i2c_msg msgs[] = {
+               { .addr = addr, .flags = 0, .len = 1, .buf = &offset },
+               { .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, },
+       };
+
+       return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
+}
+
+static bool
+mxm_shadow_rom(struct drm_device *dev, u8 version)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_i2c_chan *i2c = NULL;
+       u8 i2cidx, mxms[6], addr, size;
+
+       i2cidx = mxm_ddc_map(dev, 1 /* LVDS_DDC */) & 0x0f;
+       if (i2cidx < 0x0f)
+               i2c = nouveau_i2c_find(dev, i2cidx);
+       if (!i2c)
+               return false;
+
+       addr = 0x54;
+       if (!mxm_shadow_rom_fetch(i2c, addr, 0, 6, mxms)) {
+               addr = 0x56;
+               if (!mxm_shadow_rom_fetch(i2c, addr, 0, 6, mxms))
+                       return false;
+       }
+
+       dev_priv->mxms = mxms;
+       size = mxms_headerlen(dev) + mxms_structlen(dev);
+       dev_priv->mxms = kmalloc(size, GFP_KERNEL);
+
+       if (dev_priv->mxms &&
+           mxm_shadow_rom_fetch(i2c, addr, 0, size, dev_priv->mxms))
+               return true;
+
+       kfree(dev_priv->mxms);
+       dev_priv->mxms = NULL;
+       return false;
+}
+
+#if defined(CONFIG_ACPI)
+static bool
+mxm_shadow_dsm(struct drm_device *dev, u8 version)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       static char muid[] = {
+               0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
+               0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
+       };
+       u32 mxms_args[] = { 0x00000000 };
+       union acpi_object args[4] = {
+               /* _DSM MUID */
+               { .buffer.type = 3,
+                 .buffer.length = sizeof(muid),
+                 .buffer.pointer = muid,
+               },
+               /* spec says this can be zero to mean "highest revision", but
+                * of course there's at least one bios out there which fails
+                * unless you pass in exactly the version it supports..
+                */
+               { .integer.type = ACPI_TYPE_INTEGER,
+                 .integer.value = (version & 0xf0) << 4 | (version & 0x0f),
+               },
+               /* MXMS function */
+               { .integer.type = ACPI_TYPE_INTEGER,
+                 .integer.value = 0x00000010,
+               },
+               /* Pointer to MXMS arguments */
+               { .buffer.type = ACPI_TYPE_BUFFER,
+                 .buffer.length = sizeof(mxms_args),
+                 .buffer.pointer = (char *)mxms_args,
+               },
+       };
+       struct acpi_object_list list = { ARRAY_SIZE(args), args };
+       struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object *obj;
+       acpi_handle handle;
+       int ret;
+
+       handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+       if (!handle)
+               return false;
+
+       ret = acpi_evaluate_object(handle, "_DSM", &list, &retn);
+       if (ret) {
+               MXM_DBG(dev, "DSM MXMS failed: %d\n", ret);
+               return false;
+       }
+
+       obj = retn.pointer;
+       if (obj->type == ACPI_TYPE_BUFFER) {
+               dev_priv->mxms = kmemdup(obj->buffer.pointer,
+                                        obj->buffer.length, GFP_KERNEL);
+       } else
+       if (obj->type == ACPI_TYPE_INTEGER) {
+               MXM_DBG(dev, "DSM MXMS returned 0x%llx\n", obj->integer.value);
+       }
+
+       kfree(obj);
+       return dev_priv->mxms != NULL;
+}
+#endif
+
+#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
+
+#define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0"
+
+static bool
+mxm_shadow_wmi(struct drm_device *dev, u8 version)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 };
+       struct acpi_buffer args = { sizeof(mxms_args), mxms_args };
+       struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object *obj;
+       acpi_status status;
+
+       if (!wmi_has_guid(WMI_WMMX_GUID))
+               return false;
+
+       status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
+       if (ACPI_FAILURE(status)) {
+               MXM_DBG(dev, "WMMX MXMS returned %d\n", status);
+               return false;
+       }
+
+       obj = retn.pointer;
+       if (obj->type == ACPI_TYPE_BUFFER) {
+               dev_priv->mxms = kmemdup(obj->buffer.pointer,
+                                        obj->buffer.length, GFP_KERNEL);
+       }
+
+       kfree(obj);
+       return dev_priv->mxms != NULL;
+}
+#endif
+
+struct mxm_shadow_h {
+       const char *name;
+       bool (*exec)(struct drm_device *, u8 version);
+} _mxm_shadow[] = {
+       { "ROM", mxm_shadow_rom },
+#if defined(CONFIG_ACPI)
+       { "DSM", mxm_shadow_dsm },
+#endif
+#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
+       { "WMI", mxm_shadow_wmi },
+#endif
+       {}
+};
+
+static int
+mxm_shadow(struct drm_device *dev, u8 version)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct mxm_shadow_h *shadow = _mxm_shadow;
+       do {
+               MXM_DBG(dev, "checking %s\n", shadow->name);
+               if (shadow->exec(dev, version)) {
+                       if (mxms_valid(dev))
+                               return 0;
+                       kfree(dev_priv->mxms);
+                       dev_priv->mxms = NULL;
+               }
+       } while ((++shadow)->name);
+       return -ENOENT;
+}
+
+int
+nouveau_mxm_init(struct drm_device *dev)
+{
+       u8 mxm_size, *mxm = mxm_table(dev, &mxm_size);
+       if (!mxm || !mxm[0]) {
+               MXM_MSG(dev, "no VBIOS data, nothing to do\n");
+               return 0;
+       }
+
+       MXM_MSG(dev, "BIOS version %d.%d\n", mxm[0] >> 4, mxm[0] & 0x0f);
+
+       if (mxm_shadow(dev, mxm[0])) {
+               MXM_MSG(dev, "failed to locate valid SIS\n");
+               return -EINVAL;
+       }
+
+       MXM_MSG(dev, "MXMS Version %d.%d\n",
+               mxms_version(dev) >> 8, mxms_version(dev) & 0xff);
+       mxms_foreach(dev, 0, NULL, NULL);
+
+       if (nouveau_mxmdcb)
+               mxm_dcb_sanitise(dev);
+       return 0;
+}
+
+void
+nouveau_mxm_fini(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       kfree(dev_priv->mxms);
+       dev_priv->mxms = NULL;
+}
index 6abdbe6..2ef883c 100644 (file)
@@ -115,7 +115,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_gpuobj *nobj = NULL;
        struct drm_mm_node *mem;
-       uint32_t offset;
+       uint64_t offset;
        int target, ret;
 
        mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
index 960c0ae..cc419fa 100644 (file)
@@ -723,14 +723,14 @@ nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
        nv_wo32(chan->ramin, 0x020c, 0x000000ff);
 
        /* map display semaphore buffers into channel's vm */
-       if (dev_priv->card_type >= NV_D0)
-               return 0;
-
-       for (i = 0; i < 2; i++) {
-               struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i];
-
-               ret = nouveau_bo_vma_add(dispc->sem.bo, chan->vm,
-                                        &chan->dispc_vma[i]);
+       for (i = 0; i < dev->mode_config.num_crtc; i++) {
+               struct nouveau_bo *bo;
+               if (dev_priv->card_type >= NV_D0)
+                       bo = nvd0_display_crtc_sema(dev, i);
+               else
+                       bo = nv50_display(dev)->crtc[i].sem.bo;
+
+               ret = nouveau_bo_vma_add(bo, chan->vm, &chan->dispc_vma[i]);
                if (ret)
                        return ret;
        }
@@ -879,9 +879,14 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
 
        NV_DEBUG(dev, "ch%d\n", chan->id);
 
-       if (dev_priv->card_type >= NV_50 && dev_priv->card_type <= NV_C0) {
+       if (dev_priv->card_type >= NV_D0) {
+               for (i = 0; i < dev->mode_config.num_crtc; i++) {
+                       struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
+                       nouveau_bo_vma_del(bo, &chan->dispc_vma[i]);
+               }
+       } else
+       if (dev_priv->card_type >= NV_50) {
                struct nv50_display *disp = nv50_display(dev);
-
                for (i = 0; i < dev->mode_config.num_crtc; i++) {
                        struct nv50_display_crtc *dispc = &disp->crtc[i];
                        nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
index 33d03fb..58f4973 100644 (file)
@@ -41,7 +41,7 @@ legacy_perf_init(struct drm_device *dev)
                return;
        }
 
-       perf = ROMPTR(bios, bmp[0x73]);
+       perf = ROMPTR(dev, bmp[0x73]);
        if (!perf) {
                NV_DEBUG(dev, "No memclock table pointer found.\n");
                return;
@@ -87,7 +87,7 @@ nouveau_perf_timing(struct drm_device *dev, struct bit_entry *P,
         * ramcfg to select the correct subentry
         */
        if (P->version == 2) {
-               u8 *tmap = ROMPTR(bios, P->data[4]);
+               u8 *tmap = ROMPTR(dev, P->data[4]);
                if (!tmap) {
                        NV_DEBUG(dev, "no timing map pointer\n");
                        return NULL;
@@ -140,7 +140,6 @@ nouveau_perf_voltage(struct drm_device *dev, struct bit_entry *P,
                     struct nouveau_pm_level *perflvl)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nvbios *bios = &dev_priv->vbios;
        u8 *vmap;
        int id;
 
@@ -165,7 +164,7 @@ nouveau_perf_voltage(struct drm_device *dev, struct bit_entry *P,
                return;
        }
 
-       vmap = ROMPTR(bios, P->data[32]);
+       vmap = ROMPTR(dev, P->data[32]);
        if (!vmap) {
                NV_DEBUG(dev, "volt map table pointer invalid\n");
                return;
@@ -200,12 +199,14 @@ nouveau_perf_init(struct drm_device *dev)
                        return;
                }
 
-               perf = ROMPTR(bios, P.data[0]);
+               perf = ROMPTR(dev, P.data[0]);
                version   = perf[0];
                headerlen = perf[1];
                if (version < 0x40) {
                        recordlen = perf[3] + (perf[4] * perf[5]);
                        entries   = perf[2];
+
+                       pm->pwm_divisor = ROM16(perf[6]);
                } else {
                        recordlen = perf[2] + (perf[3] * perf[4]);
                        entries   = perf[5];
@@ -216,7 +217,7 @@ nouveau_perf_init(struct drm_device *dev)
                        return;
                }
 
-               perf = ROMPTR(bios, bios->data[bios->offset + 0x94]);
+               perf = ROMPTR(dev, bios->data[bios->offset + 0x94]);
                if (!perf) {
                        NV_DEBUG(dev, "perf table pointer invalid\n");
                        return;
@@ -283,7 +284,6 @@ nouveau_perf_init(struct drm_device *dev)
                                perflvl->memory = ROM16(entry[11]) * 1000;
                        else
                                perflvl->memory = ROM16(entry[11]) * 2000;
-
                        break;
                case 0x25:
                        perflvl->fanspeed = entry[4];
@@ -300,8 +300,8 @@ nouveau_perf_init(struct drm_device *dev)
                        perflvl->core = ROM16(entry[8]) * 1000;
                        perflvl->shader = ROM16(entry[10]) * 1000;
                        perflvl->memory = ROM16(entry[12]) * 1000;
-                       /*XXX: confirm on 0x35 */
-                       perflvl->unk05 = ROM16(entry[16]) * 1000;
+                       perflvl->vdec = ROM16(entry[16]) * 1000;
+                       perflvl->dom6 = ROM16(entry[20]) * 1000;
                        break;
                case 0x40:
 #define subent(n) (ROM16(entry[perf[2] + ((n) * perf[3])]) & 0xfff) * 1000
index a539fd2..9064d7f 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "nouveau_drv.h"
 #include "nouveau_pm.h"
+#include "nouveau_gpio.h"
 
 #ifdef CONFIG_ACPI
 #include <linux/acpi.h>
 #include <linux/hwmon-sysfs.h>
 
 static int
-nouveau_pm_clock_set(struct drm_device *dev, struct nouveau_pm_level *perflvl,
-                    u8 id, u32 khz)
+nouveau_pwmfan_get(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
-       void *pre_state;
+       struct gpio_func gpio;
+       u32 divs, duty;
+       int ret;
 
-       if (khz == 0)
-               return 0;
+       if (!pm->pwm_get)
+               return -ENODEV;
+
+       ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio);
+       if (ret == 0) {
+               ret = pm->pwm_get(dev, gpio.line, &divs, &duty);
+               if (ret == 0) {
+                       divs = max(divs, duty);
+                       if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
+                               duty = divs - duty;
+                       return (duty * 100) / divs;
+               }
+
+               return nouveau_gpio_func_get(dev, gpio.func) * 100;
+       }
+
+       return -ENODEV;
+}
+
+static int
+nouveau_pwmfan_set(struct drm_device *dev, int percent)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       struct gpio_func gpio;
+       u32 divs, duty;
+       int ret;
+
+       if (!pm->pwm_set)
+               return -ENODEV;
+
+       ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio);
+       if (ret == 0) {
+               divs = pm->pwm_divisor;
+               if (pm->fan.pwm_freq) {
+                       /*XXX: PNVIO clock more than likely... */
+                       divs = 135000 / pm->fan.pwm_freq;
+                       if (dev_priv->chipset < 0xa3)
+                               divs /= 4;
+               }
+
+               duty = ((divs * percent) + 99) / 100;
+               if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
+                       duty = divs - duty;
 
-       pre_state = pm->clock_pre(dev, perflvl, id, khz);
-       if (IS_ERR(pre_state))
-               return PTR_ERR(pre_state);
+               return pm->pwm_set(dev, gpio.line, divs, duty);
+       }
+
+       return -ENODEV;
+}
+
+static int
+nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+                      struct nouveau_pm_level *a, struct nouveau_pm_level *b)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       int ret;
+
+       /*XXX: not on all boards, we should control based on temperature
+        *     on recent boards..  or maybe on some other factor we don't
+        *     know about?
+        */
+       if (a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) {
+               ret = nouveau_pwmfan_set(dev, perflvl->fanspeed);
+               if (ret && ret != -ENODEV) {
+                       NV_ERROR(dev, "fanspeed set failed: %d\n", ret);
+                       return ret;
+               }
+       }
+
+       if (pm->voltage.supported && pm->voltage_set) {
+               if (perflvl->volt_min && b->volt_min > a->volt_min) {
+                       ret = pm->voltage_set(dev, perflvl->volt_min);
+                       if (ret) {
+                               NV_ERROR(dev, "voltage set failed: %d\n", ret);
+                               return ret;
+                       }
+               }
+       }
 
-       if (pre_state)
-               pm->clock_set(dev, pre_state);
        return 0;
 }
 
@@ -59,31 +133,24 @@ nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       void *state;
        int ret;
 
        if (perflvl == pm->cur)
                return 0;
 
-       if (pm->voltage.supported && pm->voltage_set && perflvl->volt_min) {
-               ret = pm->voltage_set(dev, perflvl->volt_min);
-               if (ret) {
-                       NV_ERROR(dev, "voltage_set %d failed: %d\n",
-                                perflvl->volt_min, ret);
-               }
-       }
+       ret = nouveau_pm_perflvl_aux(dev, perflvl, pm->cur, perflvl);
+       if (ret)
+               return ret;
 
-       if (pm->clocks_pre) {
-               void *state = pm->clocks_pre(dev, perflvl);
-               if (IS_ERR(state))
-                       return PTR_ERR(state);
-               pm->clocks_set(dev, state);
-       } else
-       if (pm->clock_set) {
-               nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core);
-               nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader);
-               nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory);
-               nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05);
-       }
+       state = pm->clocks_pre(dev, perflvl);
+       if (IS_ERR(state))
+               return PTR_ERR(state);
+       pm->clocks_set(dev, state);
+
+       ret = nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
+       if (ret)
+               return ret;
 
        pm->cur = perflvl;
        return 0;
@@ -130,28 +197,9 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 
        memset(perflvl, 0, sizeof(*perflvl));
 
-       if (pm->clocks_get) {
-               ret = pm->clocks_get(dev, perflvl);
-               if (ret)
-                       return ret;
-       } else
-       if (pm->clock_get) {
-               ret = pm->clock_get(dev, PLL_CORE);
-               if (ret > 0)
-                       perflvl->core = ret;
-
-               ret = pm->clock_get(dev, PLL_MEMORY);
-               if (ret > 0)
-                       perflvl->memory = ret;
-
-               ret = pm->clock_get(dev, PLL_SHADER);
-               if (ret > 0)
-                       perflvl->shader = ret;
-
-               ret = pm->clock_get(dev, PLL_UNK05);
-               if (ret > 0)
-                       perflvl->unk05 = ret;
-       }
+       ret = pm->clocks_get(dev, perflvl);
+       if (ret)
+               return ret;
 
        if (pm->voltage.supported && pm->voltage_get) {
                ret = pm->voltage_get(dev);
@@ -161,6 +209,10 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
                }
        }
 
+       ret = nouveau_pwmfan_get(dev);
+       if (ret > 0)
+               perflvl->fanspeed = ret;
+
        return 0;
 }
 
@@ -412,6 +464,172 @@ static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO,
                                                nouveau_hwmon_show_update_rate,
                                                NULL, 0);
 
+static ssize_t
+nouveau_hwmon_show_fan0_input(struct device *d, struct device_attribute *attr,
+                             char *buf)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+       struct gpio_func gpio;
+       u32 cycles, cur, prev;
+       u64 start;
+       int ret;
+
+       ret = nouveau_gpio_find(dev, 0, DCB_GPIO_FAN_SENSE, 0xff, &gpio);
+       if (ret)
+               return ret;
+
+       /* Monitor the GPIO input 0x3b for 250ms.
+        * When the fan spins, it changes the value of GPIO FAN_SENSE.
+        * We get 4 changes (0 -> 1 -> 0 -> 1 -> [...]) per complete rotation.
+        */
+       start = ptimer->read(dev);
+       prev = nouveau_gpio_sense(dev, 0, gpio.line);
+       cycles = 0;
+       do {
+               cur = nouveau_gpio_sense(dev, 0, gpio.line);
+               if (prev != cur) {
+                       cycles++;
+                       prev = cur;
+               }
+
+               usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
+       } while (ptimer->read(dev) - start < 250000000);
+
+       /* interpolate to get rpm */
+       return sprintf(buf, "%i\n", cycles / 4 * 4 * 60);
+}
+static SENSOR_DEVICE_ATTR(fan0_input, S_IRUGO, nouveau_hwmon_show_fan0_input,
+                         NULL, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm0(struct device *d, struct device_attribute *a, char *buf)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       int ret;
+
+       ret = nouveau_pwmfan_get(dev);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm0(struct device *d, struct device_attribute *a,
+                      const char *buf, size_t count)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       int ret = -ENODEV;
+       long value;
+
+       if (nouveau_perflvl_wr != 7777)
+               return -EPERM;
+
+       if (strict_strtol(buf, 10, &value) == -EINVAL)
+               return -EINVAL;
+
+       if (value < pm->fan.min_duty)
+               value = pm->fan.min_duty;
+       if (value > pm->fan.max_duty)
+               value = pm->fan.max_duty;
+
+       ret = nouveau_pwmfan_set(dev, value);
+       if (ret)
+               return ret;
+
+       return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm0, S_IRUGO | S_IWUSR,
+                         nouveau_hwmon_get_pwm0,
+                         nouveau_hwmon_set_pwm0, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm0_min(struct device *d,
+                          struct device_attribute *a, char *buf)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+
+       return sprintf(buf, "%i\n", pm->fan.min_duty);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm0_min(struct device *d, struct device_attribute *a,
+                          const char *buf, size_t count)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       long value;
+
+       if (strict_strtol(buf, 10, &value) == -EINVAL)
+               return -EINVAL;
+
+       if (value < 0)
+               value = 0;
+
+       if (pm->fan.max_duty - value < 10)
+               value = pm->fan.max_duty - 10;
+
+       if (value < 10)
+               pm->fan.min_duty = 10;
+       else
+               pm->fan.min_duty = value;
+
+       return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm0_min, S_IRUGO | S_IWUSR,
+                         nouveau_hwmon_get_pwm0_min,
+                         nouveau_hwmon_set_pwm0_min, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm0_max(struct device *d,
+                          struct device_attribute *a, char *buf)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+
+       return sprintf(buf, "%i\n", pm->fan.max_duty);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm0_max(struct device *d, struct device_attribute *a,
+                          const char *buf, size_t count)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       long value;
+
+       if (strict_strtol(buf, 10, &value) == -EINVAL)
+               return -EINVAL;
+
+       if (value < 0)
+               value = 0;
+
+       if (value - pm->fan.min_duty < 10)
+               value = pm->fan.min_duty + 10;
+
+       if (value > 100)
+               pm->fan.max_duty = 100;
+       else
+               pm->fan.max_duty = value;
+
+       return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm0_max, S_IRUGO | S_IWUSR,
+                         nouveau_hwmon_get_pwm0_max,
+                         nouveau_hwmon_set_pwm0_max, 0);
+
 static struct attribute *hwmon_attributes[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,
        &sensor_dev_attr_temp1_max.dev_attr.attr,
@@ -420,20 +638,36 @@ static struct attribute *hwmon_attributes[] = {
        &sensor_dev_attr_update_rate.dev_attr.attr,
        NULL
 };
+static struct attribute *hwmon_fan_rpm_attributes[] = {
+       &sensor_dev_attr_fan0_input.dev_attr.attr,
+       NULL
+};
+static struct attribute *hwmon_pwm_fan_attributes[] = {
+       &sensor_dev_attr_pwm0.dev_attr.attr,
+       &sensor_dev_attr_pwm0_min.dev_attr.attr,
+       &sensor_dev_attr_pwm0_max.dev_attr.attr,
+       NULL
+};
 
 static const struct attribute_group hwmon_attrgroup = {
        .attrs = hwmon_attributes,
 };
+static const struct attribute_group hwmon_fan_rpm_attrgroup = {
+       .attrs = hwmon_fan_rpm_attributes,
+};
+static const struct attribute_group hwmon_pwm_fan_attrgroup = {
+       .attrs = hwmon_pwm_fan_attributes,
+};
 #endif
 
 static int
 nouveau_hwmon_init(struct drm_device *dev)
 {
-#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
        struct device *hwmon_dev;
-       int ret;
+       int ret = 0;
 
        if (!pm->temp_get)
                return -ENODEV;
@@ -446,17 +680,46 @@ nouveau_hwmon_init(struct drm_device *dev)
                return ret;
        }
        dev_set_drvdata(hwmon_dev, dev);
+
+       /* default sysfs entries */
        ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
        if (ret) {
-               NV_ERROR(dev,
-                       "Unable to create hwmon sysfs file: %d\n", ret);
-               hwmon_device_unregister(hwmon_dev);
-               return ret;
+               if (ret)
+                       goto error;
+       }
+
+       /* if the card has a pwm fan */
+       /*XXX: incorrect, need better detection for this, some boards have
+        *     the gpio entries for pwm fan control even when there's no
+        *     actual fan connected to it... therm table? */
+       if (nouveau_pwmfan_get(dev) >= 0) {
+               ret = sysfs_create_group(&dev->pdev->dev.kobj,
+                                        &hwmon_pwm_fan_attrgroup);
+               if (ret)
+                       goto error;
+       }
+
+       /* if the card can read the fan rpm */
+       if (nouveau_gpio_func_valid(dev, DCB_GPIO_FAN_SENSE)) {
+               ret = sysfs_create_group(&dev->pdev->dev.kobj,
+                                        &hwmon_fan_rpm_attrgroup);
+               if (ret)
+                       goto error;
        }
 
        pm->hwmon = hwmon_dev;
-#endif
+
+       return 0;
+
+error:
+       NV_ERROR(dev, "Unable to create some hwmon sysfs files: %d\n", ret);
+       hwmon_device_unregister(hwmon_dev);
+       pm->hwmon = NULL;
+       return ret;
+#else
+       pm->hwmon = NULL;
        return 0;
+#endif
 }
 
 static void
@@ -468,6 +731,9 @@ nouveau_hwmon_fini(struct drm_device *dev)
 
        if (pm->hwmon) {
                sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
+               sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_pwm_fan_attrgroup);
+               sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_fan_rpm_attrgroup);
+
                hwmon_device_unregister(pm->hwmon);
        }
 #endif
index 8ac02cd..2f8e14f 100644 (file)
@@ -47,29 +47,33 @@ void nouveau_mem_timing_init(struct drm_device *);
 void nouveau_mem_timing_fini(struct drm_device *);
 
 /* nv04_pm.c */
-int nv04_pm_clock_get(struct drm_device *, u32 id);
-void *nv04_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
-                       u32 id, int khz);
-void nv04_pm_clock_set(struct drm_device *, void *);
+int nv04_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nv04_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+int nv04_pm_clocks_set(struct drm_device *, void *);
 
 /* nv40_pm.c */
 int nv40_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
 void *nv40_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-void nv40_pm_clocks_set(struct drm_device *, void *);
+int nv40_pm_clocks_set(struct drm_device *, void *);
+int nv40_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
+int nv40_pm_pwm_set(struct drm_device *, int, u32, u32);
 
 /* nv50_pm.c */
-int nv50_pm_clock_get(struct drm_device *, u32 id);
-void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
-                       u32 id, int khz);
-void nv50_pm_clock_set(struct drm_device *, void *);
+int nv50_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nv50_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+int nv50_pm_clocks_set(struct drm_device *, void *);
+int nv50_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
+int nv50_pm_pwm_set(struct drm_device *, int, u32, u32);
 
 /* nva3_pm.c */
 int nva3_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
 void *nva3_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-void nva3_pm_clocks_set(struct drm_device *, void *);
+int nva3_pm_clocks_set(struct drm_device *, void *);
 
 /* nvc0_pm.c */
 int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nvc0_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+int nvc0_pm_clocks_set(struct drm_device *, void *);
 
 /* nouveau_temp.c */
 void nouveau_temp_init(struct drm_device *dev);
index c8a463b..47f245e 100644 (file)
@@ -8,91 +8,30 @@
 #define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)
 
 struct nouveau_sgdma_be {
-       struct ttm_backend backend;
+       /* this has to be the first field so populate/unpopulated in
+        * nouve_bo.c works properly, otherwise have to move them here
+        */
+       struct ttm_dma_tt ttm;
        struct drm_device *dev;
-
-       dma_addr_t *pages;
-       unsigned nr_pages;
-       bool unmap_pages;
-
        u64 offset;
-       bool bound;
 };
 
-static int
-nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
-                      struct page **pages, struct page *dummy_read_page,
-                      dma_addr_t *dma_addrs)
-{
-       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
-       struct drm_device *dev = nvbe->dev;
-       int i;
-
-       NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
-
-       nvbe->pages = dma_addrs;
-       nvbe->nr_pages = num_pages;
-       nvbe->unmap_pages = true;
-
-       /* this code path isn't called and is incorrect anyways */
-       if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
-               nvbe->unmap_pages = false;
-               return 0;
-       }
-
-       for (i = 0; i < num_pages; i++) {
-               nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0,
-                                             PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-               if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) {
-                       nvbe->nr_pages = --i;
-                       be->func->clear(be);
-                       return -EFAULT;
-               }
-       }
-
-       return 0;
-}
-
 static void
-nouveau_sgdma_clear(struct ttm_backend *be)
+nouveau_sgdma_destroy(struct ttm_tt *ttm)
 {
-       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
-       struct drm_device *dev = nvbe->dev;
-
-       if (nvbe->bound)
-               be->func->unbind(be);
+       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
 
-       if (nvbe->unmap_pages) {
-               while (nvbe->nr_pages--) {
-                       pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
-                                      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-               }
-               nvbe->unmap_pages = false;
-       }
-
-       nvbe->pages = NULL;
-}
-
-static void
-nouveau_sgdma_destroy(struct ttm_backend *be)
-{
-       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
-
-       if (be) {
+       if (ttm) {
                NV_DEBUG(nvbe->dev, "\n");
-
-               if (nvbe) {
-                       if (nvbe->pages)
-                               be->func->clear(be);
-                       kfree(nvbe);
-               }
+               ttm_dma_tt_fini(&nvbe->ttm);
+               kfree(nvbe);
        }
 }
 
 static int
-nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
 {
-       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        struct drm_device *dev = nvbe->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
@@ -102,8 +41,8 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
 
        nvbe->offset = mem->start << PAGE_SHIFT;
        pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
-       for (i = 0; i < nvbe->nr_pages; i++) {
-               dma_addr_t dma_offset = nvbe->pages[i];
+       for (i = 0; i < ttm->num_pages; i++) {
+               dma_addr_t dma_offset = nvbe->ttm.dma_address[i];
                uint32_t offset_l = lower_32_bits(dma_offset);
 
                for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
@@ -112,14 +51,13 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
                }
        }
 
-       nvbe->bound = true;
        return 0;
 }
 
 static int
-nv04_sgdma_unbind(struct ttm_backend *be)
+nv04_sgdma_unbind(struct ttm_tt *ttm)
 {
-       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        struct drm_device *dev = nvbe->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
@@ -127,22 +65,19 @@ nv04_sgdma_unbind(struct ttm_backend *be)
 
        NV_DEBUG(dev, "\n");
 
-       if (!nvbe->bound)
+       if (ttm->state != tt_bound)
                return 0;
 
        pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
-       for (i = 0; i < nvbe->nr_pages; i++) {
+       for (i = 0; i < ttm->num_pages; i++) {
                for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
                        nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
        }
 
-       nvbe->bound = false;
        return 0;
 }
 
 static struct ttm_backend_func nv04_sgdma_backend = {
-       .populate               = nouveau_sgdma_populate,
-       .clear                  = nouveau_sgdma_clear,
        .bind                   = nv04_sgdma_bind,
        .unbind                 = nv04_sgdma_unbind,
        .destroy                = nouveau_sgdma_destroy
@@ -161,14 +96,14 @@ nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
 }
 
 static int
-nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
 {
-       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
        struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
-       dma_addr_t *list = nvbe->pages;
+       dma_addr_t *list = nvbe->ttm.dma_address;
        u32 pte = mem->start << 2;
-       u32 cnt = nvbe->nr_pages;
+       u32 cnt = ttm->num_pages;
 
        nvbe->offset = mem->start << PAGE_SHIFT;
 
@@ -178,18 +113,17 @@ nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
        }
 
        nv41_sgdma_flush(nvbe);
-       nvbe->bound = true;
        return 0;
 }
 
 static int
-nv41_sgdma_unbind(struct ttm_backend *be)
+nv41_sgdma_unbind(struct ttm_tt *ttm)
 {
-       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
        struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
        u32 pte = (nvbe->offset >> 12) << 2;
-       u32 cnt = nvbe->nr_pages;
+       u32 cnt = ttm->num_pages;
 
        while (cnt--) {
                nv_wo32(pgt, pte, 0x00000000);
@@ -197,24 +131,22 @@ nv41_sgdma_unbind(struct ttm_backend *be)
        }
 
        nv41_sgdma_flush(nvbe);
-       nvbe->bound = false;
        return 0;
 }
 
 static struct ttm_backend_func nv41_sgdma_backend = {
-       .populate               = nouveau_sgdma_populate,
-       .clear                  = nouveau_sgdma_clear,
        .bind                   = nv41_sgdma_bind,
        .unbind                 = nv41_sgdma_unbind,
        .destroy                = nouveau_sgdma_destroy
 };
 
 static void
-nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
+nv44_sgdma_flush(struct ttm_tt *ttm)
 {
+       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        struct drm_device *dev = nvbe->dev;
 
-       nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
+       nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
        nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
        if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
                NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
@@ -273,14 +205,14 @@ nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
 }
 
 static int
-nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
 {
-       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
        struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
-       dma_addr_t *list = nvbe->pages;
+       dma_addr_t *list = nvbe->ttm.dma_address;
        u32 pte = mem->start << 2, tmp[4];
-       u32 cnt = nvbe->nr_pages;
+       u32 cnt = ttm->num_pages;
        int i;
 
        nvbe->offset = mem->start << PAGE_SHIFT;
@@ -308,19 +240,18 @@ nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
        if (cnt)
                nv44_sgdma_fill(pgt, list, pte, cnt);
 
-       nv44_sgdma_flush(nvbe);
-       nvbe->bound = true;
+       nv44_sgdma_flush(ttm);
        return 0;
 }
 
 static int
-nv44_sgdma_unbind(struct ttm_backend *be)
+nv44_sgdma_unbind(struct ttm_tt *ttm)
 {
-       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
        struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
        u32 pte = (nvbe->offset >> 12) << 2;
-       u32 cnt = nvbe->nr_pages;
+       u32 cnt = ttm->num_pages;
 
        if (pte & 0x0000000c) {
                u32  max = 4 - ((pte >> 2) & 0x3);
@@ -342,55 +273,47 @@ nv44_sgdma_unbind(struct ttm_backend *be)
        if (cnt)
                nv44_sgdma_fill(pgt, NULL, pte, cnt);
 
-       nv44_sgdma_flush(nvbe);
-       nvbe->bound = false;
+       nv44_sgdma_flush(ttm);
        return 0;
 }
 
 static struct ttm_backend_func nv44_sgdma_backend = {
-       .populate               = nouveau_sgdma_populate,
-       .clear                  = nouveau_sgdma_clear,
        .bind                   = nv44_sgdma_bind,
        .unbind                 = nv44_sgdma_unbind,
        .destroy                = nouveau_sgdma_destroy
 };
 
 static int
-nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
 {
-       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        struct nouveau_mem *node = mem->mm_node;
+
        /* noop: bound in move_notify() */
-       node->pages = nvbe->pages;
-       nvbe->pages = (dma_addr_t *)node;
-       nvbe->bound = true;
+       node->pages = nvbe->ttm.dma_address;
        return 0;
 }
 
 static int
-nv50_sgdma_unbind(struct ttm_backend *be)
+nv50_sgdma_unbind(struct ttm_tt *ttm)
 {
-       struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
-       struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
        /* noop: unbound in move_notify() */
-       nvbe->pages = node->pages;
-       node->pages = NULL;
-       nvbe->bound = false;
        return 0;
 }
 
 static struct ttm_backend_func nv50_sgdma_backend = {
-       .populate               = nouveau_sgdma_populate,
-       .clear                  = nouveau_sgdma_clear,
        .bind                   = nv50_sgdma_bind,
        .unbind                 = nv50_sgdma_unbind,
        .destroy                = nouveau_sgdma_destroy
 };
 
-struct ttm_backend *
-nouveau_sgdma_init_ttm(struct drm_device *dev)
+struct ttm_tt *
+nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
+                        unsigned long size, uint32_t page_flags,
+                        struct page *dummy_read_page)
 {
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+       struct drm_device *dev = dev_priv->dev;
        struct nouveau_sgdma_be *nvbe;
 
        nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
@@ -398,9 +321,13 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
                return NULL;
 
        nvbe->dev = dev;
+       nvbe->ttm.ttm.func = dev_priv->gart_info.func;
 
-       nvbe->backend.func = dev_priv->gart_info.func;
-       return &nvbe->backend;
+       if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
+               kfree(nvbe);
+               return NULL;
+       }
+       return &nvbe->ttm.ttm;
 }
 
 int
index d8831ab..f5e9891 100644 (file)
@@ -36,6 +36,7 @@
 #include "nouveau_drm.h"
 #include "nouveau_fbcon.h"
 #include "nouveau_ramht.h"
+#include "nouveau_gpio.h"
 #include "nouveau_pm.h"
 #include "nv50_display.h"
 
@@ -80,16 +81,12 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->display.early_init      = nv04_display_early_init;
                engine->display.late_takedown   = nv04_display_late_takedown;
                engine->display.create          = nv04_display_create;
-               engine->display.init            = nv04_display_init;
                engine->display.destroy         = nv04_display_destroy;
-               engine->gpio.init               = nouveau_stub_init;
-               engine->gpio.takedown           = nouveau_stub_takedown;
-               engine->gpio.get                = NULL;
-               engine->gpio.set                = NULL;
-               engine->gpio.irq_enable         = NULL;
-               engine->pm.clock_get            = nv04_pm_clock_get;
-               engine->pm.clock_pre            = nv04_pm_clock_pre;
-               engine->pm.clock_set            = nv04_pm_clock_set;
+               engine->display.init            = nv04_display_init;
+               engine->display.fini            = nv04_display_fini;
+               engine->pm.clocks_get           = nv04_pm_clocks_get;
+               engine->pm.clocks_pre           = nv04_pm_clocks_pre;
+               engine->pm.clocks_set           = nv04_pm_clocks_set;
                engine->vram.init               = nouveau_mem_detect;
                engine->vram.takedown           = nouveau_stub_takedown;
                engine->vram.flags_valid        = nouveau_mem_flags_valid;
@@ -129,16 +126,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->display.early_init      = nv04_display_early_init;
                engine->display.late_takedown   = nv04_display_late_takedown;
                engine->display.create          = nv04_display_create;
-               engine->display.init            = nv04_display_init;
                engine->display.destroy         = nv04_display_destroy;
-               engine->gpio.init               = nouveau_stub_init;
-               engine->gpio.takedown           = nouveau_stub_takedown;
-               engine->gpio.get                = nv10_gpio_get;
-               engine->gpio.set                = nv10_gpio_set;
-               engine->gpio.irq_enable         = NULL;
-               engine->pm.clock_get            = nv04_pm_clock_get;
-               engine->pm.clock_pre            = nv04_pm_clock_pre;
-               engine->pm.clock_set            = nv04_pm_clock_set;
+               engine->display.init            = nv04_display_init;
+               engine->display.fini            = nv04_display_fini;
+               engine->gpio.drive              = nv10_gpio_drive;
+               engine->gpio.sense              = nv10_gpio_sense;
+               engine->pm.clocks_get           = nv04_pm_clocks_get;
+               engine->pm.clocks_pre           = nv04_pm_clocks_pre;
+               engine->pm.clocks_set           = nv04_pm_clocks_set;
                engine->vram.init               = nouveau_mem_detect;
                engine->vram.takedown           = nouveau_stub_takedown;
                engine->vram.flags_valid        = nouveau_mem_flags_valid;
@@ -178,16 +173,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->display.early_init      = nv04_display_early_init;
                engine->display.late_takedown   = nv04_display_late_takedown;
                engine->display.create          = nv04_display_create;
-               engine->display.init            = nv04_display_init;
                engine->display.destroy         = nv04_display_destroy;
-               engine->gpio.init               = nouveau_stub_init;
-               engine->gpio.takedown           = nouveau_stub_takedown;
-               engine->gpio.get                = nv10_gpio_get;
-               engine->gpio.set                = nv10_gpio_set;
-               engine->gpio.irq_enable         = NULL;
-               engine->pm.clock_get            = nv04_pm_clock_get;
-               engine->pm.clock_pre            = nv04_pm_clock_pre;
-               engine->pm.clock_set            = nv04_pm_clock_set;
+               engine->display.init            = nv04_display_init;
+               engine->display.fini            = nv04_display_fini;
+               engine->gpio.drive              = nv10_gpio_drive;
+               engine->gpio.sense              = nv10_gpio_sense;
+               engine->pm.clocks_get           = nv04_pm_clocks_get;
+               engine->pm.clocks_pre           = nv04_pm_clocks_pre;
+               engine->pm.clocks_set           = nv04_pm_clocks_set;
                engine->vram.init               = nouveau_mem_detect;
                engine->vram.takedown           = nouveau_stub_takedown;
                engine->vram.flags_valid        = nouveau_mem_flags_valid;
@@ -227,16 +220,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->display.early_init      = nv04_display_early_init;
                engine->display.late_takedown   = nv04_display_late_takedown;
                engine->display.create          = nv04_display_create;
-               engine->display.init            = nv04_display_init;
                engine->display.destroy         = nv04_display_destroy;
-               engine->gpio.init               = nouveau_stub_init;
-               engine->gpio.takedown           = nouveau_stub_takedown;
-               engine->gpio.get                = nv10_gpio_get;
-               engine->gpio.set                = nv10_gpio_set;
-               engine->gpio.irq_enable         = NULL;
-               engine->pm.clock_get            = nv04_pm_clock_get;
-               engine->pm.clock_pre            = nv04_pm_clock_pre;
-               engine->pm.clock_set            = nv04_pm_clock_set;
+               engine->display.init            = nv04_display_init;
+               engine->display.fini            = nv04_display_fini;
+               engine->gpio.drive              = nv10_gpio_drive;
+               engine->gpio.sense              = nv10_gpio_sense;
+               engine->pm.clocks_get           = nv04_pm_clocks_get;
+               engine->pm.clocks_pre           = nv04_pm_clocks_pre;
+               engine->pm.clocks_set           = nv04_pm_clocks_set;
                engine->pm.voltage_get          = nouveau_voltage_gpio_get;
                engine->pm.voltage_set          = nouveau_voltage_gpio_set;
                engine->vram.init               = nouveau_mem_detect;
@@ -279,19 +270,22 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->display.early_init      = nv04_display_early_init;
                engine->display.late_takedown   = nv04_display_late_takedown;
                engine->display.create          = nv04_display_create;
-               engine->display.init            = nv04_display_init;
                engine->display.destroy         = nv04_display_destroy;
-               engine->gpio.init               = nouveau_stub_init;
-               engine->gpio.takedown           = nouveau_stub_takedown;
-               engine->gpio.get                = nv10_gpio_get;
-               engine->gpio.set                = nv10_gpio_set;
-               engine->gpio.irq_enable         = NULL;
+               engine->display.init            = nv04_display_init;
+               engine->display.fini            = nv04_display_fini;
+               engine->gpio.init               = nv10_gpio_init;
+               engine->gpio.fini               = nv10_gpio_fini;
+               engine->gpio.drive              = nv10_gpio_drive;
+               engine->gpio.sense              = nv10_gpio_sense;
+               engine->gpio.irq_enable         = nv10_gpio_irq_enable;
                engine->pm.clocks_get           = nv40_pm_clocks_get;
                engine->pm.clocks_pre           = nv40_pm_clocks_pre;
                engine->pm.clocks_set           = nv40_pm_clocks_set;
                engine->pm.voltage_get          = nouveau_voltage_gpio_get;
                engine->pm.voltage_set          = nouveau_voltage_gpio_set;
                engine->pm.temp_get             = nv40_temp_get;
+               engine->pm.pwm_get              = nv40_pm_pwm_get;
+               engine->pm.pwm_set              = nv40_pm_pwm_set;
                engine->vram.init               = nouveau_mem_detect;
                engine->vram.takedown           = nouveau_stub_takedown;
                engine->vram.flags_valid        = nouveau_mem_flags_valid;
@@ -334,14 +328,13 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->display.early_init      = nv50_display_early_init;
                engine->display.late_takedown   = nv50_display_late_takedown;
                engine->display.create          = nv50_display_create;
-               engine->display.init            = nv50_display_init;
                engine->display.destroy         = nv50_display_destroy;
+               engine->display.init            = nv50_display_init;
+               engine->display.fini            = nv50_display_fini;
                engine->gpio.init               = nv50_gpio_init;
-               engine->gpio.takedown           = nv50_gpio_fini;
-               engine->gpio.get                = nv50_gpio_get;
-               engine->gpio.set                = nv50_gpio_set;
-               engine->gpio.irq_register       = nv50_gpio_irq_register;
-               engine->gpio.irq_unregister     = nv50_gpio_irq_unregister;
+               engine->gpio.fini               = nv50_gpio_fini;
+               engine->gpio.drive              = nv50_gpio_drive;
+               engine->gpio.sense              = nv50_gpio_sense;
                engine->gpio.irq_enable         = nv50_gpio_irq_enable;
                switch (dev_priv->chipset) {
                case 0x84:
@@ -354,9 +347,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                case 0xaa:
                case 0xac:
                case 0x50:
-                       engine->pm.clock_get    = nv50_pm_clock_get;
-                       engine->pm.clock_pre    = nv50_pm_clock_pre;
-                       engine->pm.clock_set    = nv50_pm_clock_set;
+                       engine->pm.clocks_get   = nv50_pm_clocks_get;
+                       engine->pm.clocks_pre   = nv50_pm_clocks_pre;
+                       engine->pm.clocks_set   = nv50_pm_clocks_set;
                        break;
                default:
                        engine->pm.clocks_get   = nva3_pm_clocks_get;
@@ -370,6 +363,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                        engine->pm.temp_get     = nv84_temp_get;
                else
                        engine->pm.temp_get     = nv40_temp_get;
+               engine->pm.pwm_get              = nv50_pm_pwm_get;
+               engine->pm.pwm_set              = nv50_pm_pwm_set;
                engine->vram.init               = nv50_vram_init;
                engine->vram.takedown           = nv50_vram_fini;
                engine->vram.get                = nv50_vram_new;
@@ -407,14 +402,13 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->display.early_init      = nv50_display_early_init;
                engine->display.late_takedown   = nv50_display_late_takedown;
                engine->display.create          = nv50_display_create;
-               engine->display.init            = nv50_display_init;
                engine->display.destroy         = nv50_display_destroy;
+               engine->display.init            = nv50_display_init;
+               engine->display.fini            = nv50_display_fini;
                engine->gpio.init               = nv50_gpio_init;
-               engine->gpio.takedown           = nouveau_stub_takedown;
-               engine->gpio.get                = nv50_gpio_get;
-               engine->gpio.set                = nv50_gpio_set;
-               engine->gpio.irq_register       = nv50_gpio_irq_register;
-               engine->gpio.irq_unregister     = nv50_gpio_irq_unregister;
+               engine->gpio.fini               = nv50_gpio_fini;
+               engine->gpio.drive              = nv50_gpio_drive;
+               engine->gpio.sense              = nv50_gpio_sense;
                engine->gpio.irq_enable         = nv50_gpio_irq_enable;
                engine->vram.init               = nvc0_vram_init;
                engine->vram.takedown           = nv50_vram_fini;
@@ -423,8 +417,12 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->vram.flags_valid        = nvc0_vram_flags_valid;
                engine->pm.temp_get             = nv84_temp_get;
                engine->pm.clocks_get           = nvc0_pm_clocks_get;
+               engine->pm.clocks_pre           = nvc0_pm_clocks_pre;
+               engine->pm.clocks_set           = nvc0_pm_clocks_set;
                engine->pm.voltage_get          = nouveau_voltage_gpio_get;
                engine->pm.voltage_set          = nouveau_voltage_gpio_set;
+               engine->pm.pwm_get              = nv50_pm_pwm_get;
+               engine->pm.pwm_set              = nv50_pm_pwm_set;
                break;
        case 0xd0:
                engine->instmem.init            = nvc0_instmem_init;
@@ -457,21 +455,23 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->display.early_init      = nouveau_stub_init;
                engine->display.late_takedown   = nouveau_stub_takedown;
                engine->display.create          = nvd0_display_create;
-               engine->display.init            = nvd0_display_init;
                engine->display.destroy         = nvd0_display_destroy;
+               engine->display.init            = nvd0_display_init;
+               engine->display.fini            = nvd0_display_fini;
                engine->gpio.init               = nv50_gpio_init;
-               engine->gpio.takedown           = nouveau_stub_takedown;
-               engine->gpio.get                = nvd0_gpio_get;
-               engine->gpio.set                = nvd0_gpio_set;
-               engine->gpio.irq_register       = nv50_gpio_irq_register;
-               engine->gpio.irq_unregister     = nv50_gpio_irq_unregister;
+               engine->gpio.fini               = nv50_gpio_fini;
+               engine->gpio.drive              = nvd0_gpio_drive;
+               engine->gpio.sense              = nvd0_gpio_sense;
                engine->gpio.irq_enable         = nv50_gpio_irq_enable;
                engine->vram.init               = nvc0_vram_init;
                engine->vram.takedown           = nv50_vram_fini;
                engine->vram.get                = nvc0_vram_new;
                engine->vram.put                = nv50_vram_del;
                engine->vram.flags_valid        = nvc0_vram_flags_valid;
+               engine->pm.temp_get             = nv84_temp_get;
                engine->pm.clocks_get           = nvc0_pm_clocks_get;
+               engine->pm.clocks_pre           = nvc0_pm_clocks_pre;
+               engine->pm.clocks_set           = nvc0_pm_clocks_set;
                engine->pm.voltage_get          = nouveau_voltage_gpio_get;
                engine->pm.voltage_set          = nouveau_voltage_gpio_set;
                break;
@@ -615,7 +615,7 @@ nouveau_card_init(struct drm_device *dev)
                goto out_gart;
 
        /* PGPIO */
-       ret = engine->gpio.init(dev);
+       ret = nouveau_gpio_create(dev);
        if (ret)
                goto out_mc;
 
@@ -648,6 +648,7 @@ nouveau_card_init(struct drm_device *dev)
                        nv50_graph_create(dev);
                        break;
                case NV_C0:
+               case NV_D0:
                        nvc0_graph_create(dev);
                        break;
                default:
@@ -663,6 +664,11 @@ nouveau_card_init(struct drm_device *dev)
                case 0xa0:
                        nv84_crypt_create(dev);
                        break;
+               case 0x98:
+               case 0xaa:
+               case 0xac:
+                       nv98_crypt_create(dev);
+                       break;
                }
 
                switch (dev_priv->card_type) {
@@ -684,15 +690,25 @@ nouveau_card_init(struct drm_device *dev)
                        break;
                }
 
+               if (dev_priv->chipset >= 0xa3 || dev_priv->chipset == 0x98) {
+                       nv84_bsp_create(dev);
+                       nv84_vp_create(dev);
+                       nv98_ppp_create(dev);
+               } else
+               if (dev_priv->chipset >= 0x84) {
+                       nv50_mpeg_create(dev);
+                       nv84_bsp_create(dev);
+                       nv84_vp_create(dev);
+               } else
+               if (dev_priv->chipset >= 0x50) {
+                       nv50_mpeg_create(dev);
+               } else
                if (dev_priv->card_type == NV_40 ||
                    dev_priv->chipset == 0x31 ||
                    dev_priv->chipset == 0x34 ||
-                   dev_priv->chipset == 0x36)
+                   dev_priv->chipset == 0x36) {
                        nv31_mpeg_create(dev);
-               else
-               if (dev_priv->card_type == NV_50 &&
-                   (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0))
-                       nv50_mpeg_create(dev);
+               }
 
                for (e = 0; e < NVOBJ_ENGINE_NR; e++) {
                        if (dev_priv->eng[e]) {
@@ -712,27 +728,7 @@ nouveau_card_init(struct drm_device *dev)
        if (ret)
                goto out_fifo;
 
-       /* initialise general modesetting */
-       drm_mode_config_init(dev);
-       drm_mode_create_scaling_mode_property(dev);
-       drm_mode_create_dithering_property(dev);
-       dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
-       dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
-       dev->mode_config.min_width = 0;
-       dev->mode_config.min_height = 0;
-       if (dev_priv->card_type < NV_10) {
-               dev->mode_config.max_width = 2048;
-               dev->mode_config.max_height = 2048;
-       } else
-       if (dev_priv->card_type < NV_50) {
-               dev->mode_config.max_width = 4096;
-               dev->mode_config.max_height = 4096;
-       } else {
-               dev->mode_config.max_width = 8192;
-               dev->mode_config.max_height = 8192;
-       }
-
-       ret = engine->display.create(dev);
+       ret = nouveau_display_create(dev);
        if (ret)
                goto out_irq;
 
@@ -752,12 +748,11 @@ nouveau_card_init(struct drm_device *dev)
        }
 
        if (dev->mode_config.num_crtc) {
-               ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+               ret = nouveau_display_init(dev);
                if (ret)
                        goto out_chan;
 
                nouveau_fbcon_init(dev);
-               drm_kms_helper_poll_init(dev);
        }
 
        return 0;
@@ -768,7 +763,7 @@ out_fence:
        nouveau_fence_fini(dev);
 out_disp:
        nouveau_backlight_exit(dev);
-       engine->display.destroy(dev);
+       nouveau_display_destroy(dev);
 out_irq:
        nouveau_irq_fini(dev);
 out_fifo:
@@ -788,7 +783,7 @@ out_engine:
 out_timer:
        engine->timer.takedown(dev);
 out_gpio:
-       engine->gpio.takedown(dev);
+       nouveau_gpio_destroy(dev);
 out_mc:
        engine->mc.takedown(dev);
 out_gart:
@@ -818,9 +813,8 @@ static void nouveau_card_takedown(struct drm_device *dev)
        int e;
 
        if (dev->mode_config.num_crtc) {
-               drm_kms_helper_poll_fini(dev);
                nouveau_fbcon_fini(dev);
-               drm_vblank_cleanup(dev);
+               nouveau_display_fini(dev);
        }
 
        if (dev_priv->channel) {
@@ -829,8 +823,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
        }
 
        nouveau_backlight_exit(dev);
-       engine->display.destroy(dev);
-       drm_mode_config_cleanup(dev);
+       nouveau_display_destroy(dev);
 
        if (!dev_priv->noaccel) {
                engine->fifo.takedown(dev);
@@ -843,7 +836,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
        }
        engine->fb.takedown(dev);
        engine->timer.takedown(dev);
-       engine->gpio.takedown(dev);
+       nouveau_gpio_destroy(dev);
        engine->mc.takedown(dev);
        engine->display.late_takedown(dev);
 
@@ -1110,13 +1103,11 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
        dev_priv->noaccel = !!nouveau_noaccel;
        if (nouveau_noaccel == -1) {
                switch (dev_priv->chipset) {
-#if 0
-               case 0xXX: /* known broken */
+               case 0xd9: /* known broken */
                        NV_INFO(dev, "acceleration disabled by default, pass "
                                     "noaccel=0 to force enable\n");
                        dev_priv->noaccel = true;
                        break;
-#endif
                default:
                        dev_priv->noaccel = false;
                        break;
@@ -1238,7 +1229,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
                getparam->value = 1;
                break;
        case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
-               getparam->value = dev_priv->card_type < NV_D0;
+               getparam->value = 1;
                break;
        case NOUVEAU_GETPARAM_GRAPH_UNITS:
                /* NV40 and NV50 versions are quite different, but register
index 5a46446..0f5a301 100644 (file)
@@ -55,6 +55,10 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
        temps->down_clock = 100;
        temps->fan_boost = 90;
 
+       /* Set the default range for the pwm fan */
+       pm->fan.min_duty = 30;
+       pm->fan.max_duty = 100;
+
        /* Set the known default values to setup the temperature sensor */
        if (dev_priv->card_type >= NV_40) {
                switch (dev_priv->chipset) {
@@ -156,11 +160,26 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
                case 0x13:
                        sensor->slope_div = value;
                        break;
+               case 0x22:
+                       pm->fan.min_duty = value & 0xff;
+                       pm->fan.max_duty = (value & 0xff00) >> 8;
+                       break;
+               case 0x26:
+                       pm->fan.pwm_freq = value;
+                       break;
                }
                temp += recordlen;
        }
 
        nouveau_temp_safety_checks(dev);
+
+       /* check the fan min/max settings */
+       if (pm->fan.min_duty < 10)
+               pm->fan.min_duty = 10;
+       if (pm->fan.max_duty > 100)
+               pm->fan.max_duty = 100;
+       if (pm->fan.max_duty < pm->fan.min_duty)
+               pm->fan.max_duty = pm->fan.min_duty;
 }
 
 static int
@@ -267,8 +286,6 @@ probe_monitoring_device(struct nouveau_i2c_chan *i2c,
 static void
 nouveau_temp_probe_i2c(struct drm_device *dev)
 {
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct dcb_table *dcb = &dev_priv->vbios.dcb;
        struct i2c_board_info info[] = {
                { I2C_BOARD_INFO("w83l785ts", 0x2d) },
                { I2C_BOARD_INFO("w83781d", 0x2d) },
@@ -277,11 +294,9 @@ nouveau_temp_probe_i2c(struct drm_device *dev)
                { I2C_BOARD_INFO("lm99", 0x4c) },
                { }
        };
-       int idx = (dcb->version >= 0x40 ?
-                  dcb->i2c_default_indices & 0xf : 2);
 
        nouveau_i2c_identify(dev, "monitoring device", info,
-                            probe_monitoring_device, idx);
+                            probe_monitoring_device, NV_I2C_DEFAULT(0));
 }
 
 void
@@ -297,9 +312,9 @@ nouveau_temp_init(struct drm_device *dev)
                        return;
 
                if (P.version == 1)
-                       temp = ROMPTR(bios, P.data[12]);
+                       temp = ROMPTR(dev, P.data[12]);
                else if (P.version == 2)
-                       temp = ROMPTR(bios, P.data[16]);
+                       temp = ROMPTR(dev, P.data[16]);
                else
                        NV_WARN(dev, "unknown temp for BIT P %d\n", P.version);
 
index ef0832b..2bf6c03 100644 (file)
@@ -78,9 +78,10 @@ nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
 
 void
 nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
-                 struct nouveau_mem *mem, dma_addr_t *list)
+                 struct nouveau_mem *mem)
 {
        struct nouveau_vm *vm = vma->vm;
+       dma_addr_t *list = mem->pages;
        int big = vma->node->type != vm->spg_shift;
        u32 offset = vma->node->offset + (delta >> 12);
        u32 bits = vma->node->type - 12;
index 6ce995f..4fb6e72 100644 (file)
@@ -89,7 +89,7 @@ void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
 void nouveau_vm_unmap(struct nouveau_vma *);
 void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
 void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
-                      struct nouveau_mem *, dma_addr_t *);
+                      struct nouveau_mem *);
 
 /* nv50_vm.c */
 void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
index 86d03e1..b010cb9 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "nouveau_drv.h"
 #include "nouveau_pm.h"
+#include "nouveau_gpio.h"
 
 static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
 static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
@@ -34,7 +35,6 @@ int
 nouveau_voltage_gpio_get(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
        struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
        u8 vid = 0;
        int i;
@@ -43,7 +43,7 @@ nouveau_voltage_gpio_get(struct drm_device *dev)
                if (!(volt->vid_mask & (1 << i)))
                        continue;
 
-               vid |= gpio->get(dev, vidtag[i]) << i;
+               vid |= nouveau_gpio_func_get(dev, vidtag[i]) << i;
        }
 
        return nouveau_volt_lvl_lookup(dev, vid);
@@ -53,7 +53,6 @@ int
 nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
        struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
        int vid, i;
 
@@ -65,7 +64,7 @@ nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
                if (!(volt->vid_mask & (1 << i)))
                        continue;
 
-               gpio->set(dev, vidtag[i], !!(vid & (1 << i)));
+               nouveau_gpio_func_set(dev, vidtag[i], !!(vid & (1 << i)));
        }
 
        return 0;
@@ -117,10 +116,10 @@ nouveau_volt_init(struct drm_device *dev)
                        return;
 
                if (P.version == 1)
-                       volt = ROMPTR(bios, P.data[16]);
+                       volt = ROMPTR(dev, P.data[16]);
                else
                if (P.version == 2)
-                       volt = ROMPTR(bios, P.data[12]);
+                       volt = ROMPTR(dev, P.data[12]);
                else {
                        NV_WARN(dev, "unknown volt for BIT P %d\n", P.version);
                }
@@ -130,7 +129,7 @@ nouveau_volt_init(struct drm_device *dev)
                        return;
                }
 
-               volt = ROMPTR(bios, bios->data[bios->offset + 0x98]);
+               volt = ROMPTR(dev, bios->data[bios->offset + 0x98]);
        }
 
        if (!volt) {
@@ -194,7 +193,7 @@ nouveau_volt_init(struct drm_device *dev)
                        return;
                }
 
-               if (!nouveau_bios_gpio_entry(dev, vidtag[i])) {
+               if (!nouveau_gpio_func_valid(dev, vidtag[i])) {
                        NV_DEBUG(dev, "vid bit %d has no gpio tag\n", i);
                        return;
                }
index 5e45398..728d075 100644 (file)
@@ -364,7 +364,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
        regp->CRTC[NV_CIO_CR_VRE_INDEX] = 1 << 5 | XLATE(vertEnd, 0, NV_CIO_CR_VRE_3_0);
        regp->CRTC[NV_CIO_CR_VDE_INDEX] = vertDisplay;
        /* framebuffer can be larger than crtc scanout area. */
-       regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitch / 8;
+       regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitches[0] / 8;
        regp->CRTC[NV_CIO_CR_ULINE_INDEX] = 0x00;
        regp->CRTC[NV_CIO_CR_VBS_INDEX] = vertBlankStart;
        regp->CRTC[NV_CIO_CR_VBE_INDEX] = vertBlankEnd;
@@ -377,9 +377,9 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
 
        /* framebuffer can be larger than crtc scanout area. */
        regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
-               XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+               XLATE(fb->pitches[0] / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
        regp->CRTC[NV_CIO_CRE_42] =
-               XLATE(fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
+               XLATE(fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11);
        regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
                                            MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
        regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
@@ -835,18 +835,18 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
        NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL,
                      regp->ramdac_gen_ctrl);
 
-       regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3;
+       regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitches[0] >> 3;
        regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
-               XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+               XLATE(drm_fb->pitches[0] >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
        regp->CRTC[NV_CIO_CRE_42] =
-               XLATE(drm_fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
+               XLATE(drm_fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11);
        crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
        crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
        crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_42);
 
        /* Update the framebuffer location. */
        regp->fb_start = nv_crtc->fb.offset & ~3;
-       regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8);
+       regp->fb_start += (y * drm_fb->pitches[0]) + (x * drm_fb->bits_per_pixel / 8);
        nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start);
 
        /* Update the arbitration parameters. */
index e000455..8300266 100644 (file)
@@ -32,6 +32,7 @@
 #include "nouveau_connector.h"
 #include "nouveau_crtc.h"
 #include "nouveau_hw.h"
+#include "nouveau_gpio.h"
 #include "nvreg.h"
 
 int nv04_dac_output_offset(struct drm_encoder *encoder)
@@ -220,7 +221,6 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
 {
        struct drm_device *dev = encoder->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
        struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
        uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
        uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
@@ -252,11 +252,11 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
                nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
        }
 
-       saved_gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1);
-       saved_gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0);
+       saved_gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1);
+       saved_gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0);
 
-       gpio->set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
-       gpio->set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
+       nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
+       nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
 
        msleep(4);
 
@@ -306,8 +306,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
                nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
        nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
 
-       gpio->set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
-       gpio->set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
+       nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
+       nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
 
        return sample;
 }
index 12098bf..2258746 100644 (file)
@@ -289,6 +289,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
        struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc);
        struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
        struct drm_display_mode *output_mode = &nv_encoder->mode;
+       struct drm_connector *connector = &nv_connector->base;
        uint32_t mode_ratio, panel_ratio;
 
        NV_DEBUG_KMS(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
@@ -340,10 +341,15 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
            output_mode->clock > 165000)
                regp->fp_control |= (2 << 24);
        if (nv_encoder->dcb->type == OUTPUT_LVDS) {
-               bool duallink, dummy;
+               bool duallink = false, dummy;
+               if (nv_connector->edid &&
+                   nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
+                       duallink = (((u8 *)nv_connector->edid)[121] == 2);
+               } else {
+                       nouveau_bios_parse_lvds_table(dev, output_mode->clock,
+                                                     &duallink, &dummy);
+               }
 
-               nouveau_bios_parse_lvds_table(dev, output_mode->clock,
-                                             &duallink, &dummy);
                if (duallink)
                        regp->fp_control |= (8 << 28);
        } else
@@ -407,7 +413,9 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
        }
 
        /* Output property. */
-       if (nv_connector->use_dithering) {
+       if ((nv_connector->dithering_mode == DITHERING_MODE_ON) ||
+           (nv_connector->dithering_mode == DITHERING_MODE_AUTO &&
+            encoder->crtc->fb->depth > connector->display_info.bpc * 3)) {
                if (dev_priv->chipset == 0x11)
                        regp->dither = savep->dither | 0x00010000;
                else {
index 6bd8518..7047d37 100644 (file)
@@ -243,6 +243,11 @@ nv04_display_init(struct drm_device *dev)
        return 0;
 }
 
+void
+nv04_display_fini(struct drm_device *dev)
+{
+}
+
 static void
 nv04_vblank_crtc0_isr(struct drm_device *dev)
 {
index 9ae92a8..6e75899 100644 (file)
 #include "nouveau_hw.h"
 #include "nouveau_pm.h"
 
-struct nv04_pm_state {
+int
+nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+       int ret;
+
+       ret = nouveau_hw_get_clock(dev, PLL_CORE);
+       if (ret < 0)
+               return ret;
+       perflvl->core = ret;
+
+       ret = nouveau_hw_get_clock(dev, PLL_MEMORY);
+       if (ret < 0)
+               return ret;
+       perflvl->memory = ret;
+
+       return 0;
+}
+
+struct nv04_pm_clock {
        struct pll_lims pll;
        struct nouveau_pll_vals calc;
 };
 
-int
-nv04_pm_clock_get(struct drm_device *dev, u32 id)
+struct nv04_pm_state {
+       struct nv04_pm_clock core;
+       struct nv04_pm_clock memory;
+};
+
+static int
+calc_pll(struct drm_device *dev, u32 id, int khz, struct nv04_pm_clock *clk)
 {
-       return nouveau_hw_get_clock(dev, id);
+       int ret;
+
+       ret = get_pll_limits(dev, id, &clk->pll);
+       if (ret)
+               return ret;
+
+       ret = nouveau_calc_pll_mnp(dev, &clk->pll, khz, &clk->calc);
+       if (!ret)
+               return -EINVAL;
+
+       return 0;
 }
 
 void *
-nv04_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
-                 u32 id, int khz)
+nv04_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 {
-       struct nv04_pm_state *state;
+       struct nv04_pm_state *info;
        int ret;
 
-       state = kzalloc(sizeof(*state), GFP_KERNEL);
-       if (!state)
+       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       if (!info)
                return ERR_PTR(-ENOMEM);
 
-       ret = get_pll_limits(dev, id, &state->pll);
-       if (ret) {
-               kfree(state);
-               return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
-       }
+       ret = calc_pll(dev, PLL_CORE, perflvl->core, &info->core);
+       if (ret)
+               goto error;
 
-       ret = nouveau_calc_pll_mnp(dev, &state->pll, khz, &state->calc);
-       if (!ret) {
-               kfree(state);
-               return ERR_PTR(-EINVAL);
+       if (perflvl->memory) {
+               ret = calc_pll(dev, PLL_MEMORY, perflvl->memory, &info->memory);
+               if (ret)
+                       goto error;
        }
 
-       return state;
+       return info;
+error:
+       kfree(info);
+       return ERR_PTR(ret);
 }
 
-void
-nv04_pm_clock_set(struct drm_device *dev, void *pre_state)
+static void
+prog_pll(struct drm_device *dev, struct nv04_pm_clock *clk)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
-       struct nv04_pm_state *state = pre_state;
-       u32 reg = state->pll.reg;
+       u32 reg = clk->pll.reg;
 
        /* thank the insane nouveau_hw_setpll() interface for this */
        if (dev_priv->card_type >= NV_40)
                reg += 4;
 
-       nouveau_hw_setpll(dev, reg, &state->calc);
+       nouveau_hw_setpll(dev, reg, &clk->calc);
+}
+
+int
+nv04_pm_clocks_set(struct drm_device *dev, void *pre_state)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+       struct nv04_pm_state *state = pre_state;
+
+       prog_pll(dev, &state->core);
 
-       if (dev_priv->card_type < NV_30 && reg == NV_PRAMDAC_MPLL_COEFF) {
-               if (dev_priv->card_type == NV_20)
-                       nv_mask(dev, 0x1002c4, 0, 1 << 20);
+       if (state->memory.pll.reg) {
+               prog_pll(dev, &state->memory);
+               if (dev_priv->card_type < NV_30) {
+                       if (dev_priv->card_type == NV_20)
+                               nv_mask(dev, 0x1002c4, 0, 1 << 20);
 
-               /* Reset the DLLs */
-               nv_mask(dev, 0x1002c0, 0, 1 << 8);
+                       /* Reset the DLLs */
+                       nv_mask(dev, 0x1002c0, 0, 1 << 8);
+               }
        }
 
-       if (reg == NV_PRAMDAC_NVPLL_COEFF)
-               ptimer->init(dev);
+       ptimer->init(dev);
 
        kfree(state);
+       return 0;
 }
-
index 263301b..55c9452 100644 (file)
@@ -2,6 +2,7 @@
 #include "drm.h"
 #include "nouveau_drv.h"
 #include "nouveau_drm.h"
+#include "nouveau_hw.h"
 
 int
 nv04_timer_init(struct drm_device *dev)
@@ -17,7 +18,7 @@ nv04_timer_init(struct drm_device *dev)
 
        /* determine base clock for timer source */
        if (dev_priv->chipset < 0x40) {
-               n = dev_priv->engine.pm.clock_get(dev, PLL_CORE);
+               n = nouveau_hw_get_clock(dev, PLL_CORE);
        } else
        if (dev_priv->chipset == 0x40) {
                /*XXX: figure this out */
index 007fc29..550ad3f 100644 (file)
 #include "drmP.h"
 #include "nouveau_drv.h"
 #include "nouveau_hw.h"
+#include "nouveau_gpio.h"
 
-static bool
-get_gpio_location(struct dcb_gpio_entry *ent, uint32_t *reg, uint32_t *shift,
-                 uint32_t *mask)
+int
+nv10_gpio_sense(struct drm_device *dev, int line)
 {
-       if (ent->line < 2) {
-               *reg = NV_PCRTC_GPIO;
-               *shift = ent->line * 16;
-               *mask = 0x11;
-
-       } else if (ent->line < 10) {
-               *reg = NV_PCRTC_GPIO_EXT;
-               *shift = (ent->line - 2) * 4;
-               *mask = 0x3;
+       if (line < 2) {
+               line = line * 16;
+               line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO) >> line;
+               return !!(line & 0x0100);
+       } else
+       if (line < 10) {
+               line = (line - 2) * 4;
+               line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT) >> line;
+               return !!(line & 0x04);
+       } else
+       if (line < 14) {
+               line = (line - 10) * 4;
+               line = NVReadCRTC(dev, 0, NV_PCRTC_850) >> line;
+               return !!(line & 0x04);
+       }
 
-       } else if (ent->line < 14) {
-               *reg = NV_PCRTC_850;
-               *shift = (ent->line - 10) * 4;
-               *mask = 0x3;
+       return -EINVAL;
+}
 
+int
+nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out)
+{
+       u32 reg, mask, data;
+
+       if (line < 2) {
+               line = line * 16;
+               reg  = NV_PCRTC_GPIO;
+               mask = 0x00000011;
+               data = (dir << 4) | out;
+       } else
+       if (line < 10) {
+               line = (line - 2) * 4;
+               reg  = NV_PCRTC_GPIO_EXT;
+               mask = 0x00000003 << ((line - 2) * 4);
+               data = (dir << 1) | out;
+       } else
+       if (line < 14) {
+               line = (line - 10) * 4;
+               reg  = NV_PCRTC_850;
+               mask = 0x00000003;
+               data = (dir << 1) | out;
        } else {
-               return false;
+               return -EINVAL;
        }
 
-       return true;
+       mask = NVReadCRTC(dev, 0, reg) & ~(mask << line);
+       NVWriteCRTC(dev, 0, reg, mask | (data << line));
+       return 0;
 }
 
-int
-nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
+void
+nv10_gpio_irq_enable(struct drm_device *dev, int line, bool on)
 {
-       struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
-       uint32_t reg, shift, mask, value;
+       u32 mask = 0x00010001 << line;
 
-       if (!ent)
-               return -ENODEV;
+       nv_wr32(dev, 0x001104, mask);
+       nv_mask(dev, 0x001144, mask, on ? mask : 0);
+}
 
-       if (!get_gpio_location(ent, &reg, &shift, &mask))
-               return -ENODEV;
+static void
+nv10_gpio_isr(struct drm_device *dev)
+{
+       u32 intr = nv_rd32(dev, 0x1104);
+       u32 hi = (intr & 0x0000ffff) >> 0;
+       u32 lo = (intr & 0xffff0000) >> 16;
 
-       value = NVReadCRTC(dev, 0, reg) >> shift;
+       nouveau_gpio_isr(dev, 0, hi | lo);
 
-       return (ent->invert ? 1 : 0) ^ (value & 1);
+       nv_wr32(dev, 0x001104, intr);
 }
 
 int
-nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
+nv10_gpio_init(struct drm_device *dev)
 {
-       struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
-       uint32_t reg, shift, mask, value;
-
-       if (!ent)
-               return -ENODEV;
-
-       if (!get_gpio_location(ent, &reg, &shift, &mask))
-               return -ENODEV;
-
-       value = ((ent->invert ? 1 : 0) ^ (state ? 1 : 0)) << shift;
-       mask = ~(mask << shift);
-
-       NVWriteCRTC(dev, 0, reg, value | (NVReadCRTC(dev, 0, reg) & mask));
-
+       nv_wr32(dev, 0x001140, 0x00000000);
+       nv_wr32(dev, 0x001100, 0xffffffff);
+       nv_wr32(dev, 0x001144, 0x00000000);
+       nv_wr32(dev, 0x001104, 0xffffffff);
+       nouveau_irq_register(dev, 28, nv10_gpio_isr); /* PBUS */
        return 0;
 }
+
+void
+nv10_gpio_fini(struct drm_device *dev)
+{
+       nv_wr32(dev, 0x001140, 0x00000000);
+       nv_wr32(dev, 0x001144, 0x00000000);
+       nouveau_irq_unregister(dev, 28);
+}
index 3900ceb..696d7e7 100644 (file)
@@ -30,6 +30,7 @@
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 #include "nouveau_crtc.h"
+#include "nouveau_gpio.h"
 #include "nouveau_hw.h"
 #include "nv17_tv.h"
 
@@ -37,7 +38,6 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
 {
        struct drm_device *dev = encoder->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
        uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
        uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
                fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
@@ -53,8 +53,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
        head = (dacclk & 0x100) >> 8;
 
        /* Save the previous state. */
-       gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1);
-       gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0);
+       gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1);
+       gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0);
        fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
        fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
        fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
@@ -65,8 +65,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
        ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
 
        /* Prepare the DAC for load detection.  */
-       gpio->set(dev, DCB_GPIO_TVDAC1, true);
-       gpio->set(dev, DCB_GPIO_TVDAC0, true);
+       nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, true);
+       nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, true);
 
        NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
        NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
@@ -111,8 +111,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
        NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
        NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
        NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
-       gpio->set(dev, DCB_GPIO_TVDAC1, gpio1);
-       gpio->set(dev, DCB_GPIO_TVDAC0, gpio0);
+       nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, gpio1);
+       nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, gpio0);
 
        return sample;
 }
@@ -357,8 +357,6 @@ static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
 static void  nv17_tv_dpms(struct drm_encoder *encoder, int mode)
 {
        struct drm_device *dev = encoder->dev;
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
        struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
        struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
 
@@ -383,8 +381,8 @@ static void  nv17_tv_dpms(struct drm_encoder *encoder, int mode)
 
        nv_load_ptv(dev, regs, 200);
 
-       gpio->set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
-       gpio->set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
+       nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
+       nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
 
        nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
 }
index e676b0d..c761538 100644 (file)
@@ -222,7 +222,7 @@ nv40_pm_gr_idle(void *data)
        return true;
 }
 
-void
+int
 nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -231,7 +231,7 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
        struct bit_entry M;
        u32 crtc_mask = 0;
        u8 sr1[2];
-       int i;
+       int i, ret = -EAGAIN;
 
        /* determine which CRTCs are active, fetch VGA_SR1 for each */
        for (i = 0; i < 2; i++) {
@@ -263,6 +263,8 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
        if (!nv_wait_cb(dev, nv40_pm_gr_idle, dev))
                goto resume;
 
+       ret = 0;
+
        /* set engine clocks */
        nv_mask(dev, 0x00c040, 0x00000333, 0x00000000);
        nv_wr32(dev, 0x004004, info->npll_coef);
@@ -345,4 +347,48 @@ resume:
        spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 
        kfree(info);
+       return ret;
+}
+
+int
+nv40_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
+{
+       if (line == 2) {
+               u32 reg = nv_rd32(dev, 0x0010f0);
+               if (reg & 0x80000000) {
+                       *duty = (reg & 0x7fff0000) >> 16;
+                       *divs = (reg & 0x00007fff);
+                       return 0;
+               }
+       } else
+       if (line == 9) {
+               u32 reg = nv_rd32(dev, 0x0015f4);
+               if (reg & 0x80000000) {
+                       *divs = nv_rd32(dev, 0x0015f8);
+                       *duty = (reg & 0x7fffffff);
+                       return 0;
+               }
+       } else {
+               NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
+               return -ENODEV;
+       }
+
+       return -EINVAL;
+}
+
+int
+nv40_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
+{
+       if (line == 2) {
+               nv_wr32(dev, 0x0010f0, 0x80000000 | (duty << 16) | divs);
+       } else
+       if (line == 9) {
+               nv_wr32(dev, 0x0015f8, divs);
+               nv_wr32(dev, 0x0015f4, duty | 0x80000000);
+       } else {
+               NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
+               return -ENODEV;
+       }
+
+       return 0;
 }
index 882080e..8f6c2ac 100644 (file)
@@ -132,33 +132,42 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
 }
 
 static int
-nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
+nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
 {
-       struct drm_device *dev = nv_crtc->base.dev;
-       struct nouveau_channel *evo = nv50_display(dev)->master;
-       int ret;
-
-       NV_DEBUG_KMS(dev, "\n");
+       struct nouveau_channel *evo = nv50_display(nv_crtc->base.dev)->master;
+       struct nouveau_connector *nv_connector;
+       struct drm_connector *connector;
+       int head = nv_crtc->index, ret;
+       u32 mode = 0x00;
 
-       ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
-       if (ret) {
-               NV_ERROR(dev, "no space while setting dither\n");
-               return ret;
+       nv_connector = nouveau_crtc_connector_get(nv_crtc);
+       connector = &nv_connector->base;
+       if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
+               if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
+                       mode = DITHERING_MODE_DYNAMIC2X2;
+       } else {
+               mode = nv_connector->dithering_mode;
        }
 
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DITHER_CTRL), 1);
-       if (on)
-               OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_ON);
-       else
-               OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_OFF);
+       if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
+               if (connector->display_info.bpc >= 8)
+                       mode |= DITHERING_DEPTH_8BPC;
+       } else {
+               mode |= nv_connector->dithering_depth;
+       }
 
-       if (update) {
-               BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
-               OUT_RING(evo, 0);
-               FIRE_RING(evo);
+       ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
+       if (ret == 0) {
+               BEGIN_RING(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
+               OUT_RING  (evo, mode);
+               if (update) {
+                       BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+                       OUT_RING  (evo, 0);
+                       FIRE_RING (evo);
+               }
        }
 
-       return 0;
+       return ret;
 }
 
 struct nouveau_connector *
@@ -180,80 +189,103 @@ nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
 }
 
 static int
-nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
+nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
 {
-       struct nouveau_connector *nv_connector =
-               nouveau_crtc_connector_get(nv_crtc);
-       struct drm_device *dev = nv_crtc->base.dev;
+       struct nouveau_connector *nv_connector;
+       struct drm_crtc *crtc = &nv_crtc->base;
+       struct drm_device *dev = crtc->dev;
        struct nouveau_channel *evo = nv50_display(dev)->master;
-       struct drm_display_mode *native_mode = NULL;
-       struct drm_display_mode *mode = &nv_crtc->base.mode;
-       uint32_t outX, outY, horiz, vert;
-       int ret;
+       struct drm_display_mode *umode = &crtc->mode;
+       struct drm_display_mode *omode;
+       int scaling_mode, ret;
+       u32 ctrl = 0, oX, oY;
 
        NV_DEBUG_KMS(dev, "\n");
 
-       switch (scaling_mode) {
-       case DRM_MODE_SCALE_NONE:
-               break;
-       default:
-               if (!nv_connector || !nv_connector->native_mode) {
-                       NV_ERROR(dev, "No native mode, forcing panel scaling\n");
-                       scaling_mode = DRM_MODE_SCALE_NONE;
+       nv_connector = nouveau_crtc_connector_get(nv_crtc);
+       if (!nv_connector || !nv_connector->native_mode) {
+               NV_ERROR(dev, "no native mode, forcing panel scaling\n");
+               scaling_mode = DRM_MODE_SCALE_NONE;
+       } else {
+               scaling_mode = nv_connector->scaling_mode;
+       }
+
+       /* start off at the resolution we programmed the crtc for, this
+        * effectively handles NONE/FULL scaling
+        */
+       if (scaling_mode != DRM_MODE_SCALE_NONE)
+               omode = nv_connector->native_mode;
+       else
+               omode = umode;
+
+       oX = omode->hdisplay;
+       oY = omode->vdisplay;
+       if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
+               oY *= 2;
+
+       /* add overscan compensation if necessary, will keep the aspect
+        * ratio the same as the backend mode unless overridden by the
+        * user setting both hborder and vborder properties.
+        */
+       if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
+                            (nv_connector->underscan == UNDERSCAN_AUTO &&
+                             nv_connector->edid &&
+                             drm_detect_hdmi_monitor(nv_connector->edid)))) {
+               u32 bX = nv_connector->underscan_hborder;
+               u32 bY = nv_connector->underscan_vborder;
+               u32 aspect = (oY << 19) / oX;
+
+               if (bX) {
+                       oX -= (bX * 2);
+                       if (bY) oY -= (bY * 2);
+                       else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
                } else {
-                       native_mode = nv_connector->native_mode;
+                       oX -= (oX >> 4) + 32;
+                       if (bY) oY -= (bY * 2);
+                       else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
                }
-               break;
        }
 
+       /* handle CENTER/ASPECT scaling, taking into account the areas
+        * removed already for overscan compensation
+        */
        switch (scaling_mode) {
+       case DRM_MODE_SCALE_CENTER:
+               oX = min((u32)umode->hdisplay, oX);
+               oY = min((u32)umode->vdisplay, oY);
+               /* fall-through */
        case DRM_MODE_SCALE_ASPECT:
-               horiz = (native_mode->hdisplay << 19) / mode->hdisplay;
-               vert = (native_mode->vdisplay << 19) / mode->vdisplay;
-
-               if (vert > horiz) {
-                       outX = (mode->hdisplay * horiz) >> 19;
-                       outY = (mode->vdisplay * horiz) >> 19;
+               if (oY < oX) {
+                       u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
+                       oX = ((oY * aspect) + (aspect / 2)) >> 19;
                } else {
-                       outX = (mode->hdisplay * vert) >> 19;
-                       outY = (mode->vdisplay * vert) >> 19;
+                       u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
+                       oY = ((oX * aspect) + (aspect / 2)) >> 19;
                }
                break;
-       case DRM_MODE_SCALE_FULLSCREEN:
-               outX = native_mode->hdisplay;
-               outY = native_mode->vdisplay;
-               break;
-       case DRM_MODE_SCALE_CENTER:
-       case DRM_MODE_SCALE_NONE:
        default:
-               outX = mode->hdisplay;
-               outY = mode->vdisplay;
                break;
        }
 
-       ret = RING_SPACE(evo, update ? 7 : 5);
+       if (umode->hdisplay != oX || umode->vdisplay != oY ||
+           umode->flags & DRM_MODE_FLAG_INTERLACE ||
+           umode->flags & DRM_MODE_FLAG_DBLSCAN)
+               ctrl |= NV50_EVO_CRTC_SCALE_CTRL_ACTIVE;
+
+       ret = RING_SPACE(evo, 5);
        if (ret)
                return ret;
 
-       /* Got a better name for SCALER_ACTIVE? */
-       /* One day i've got to really figure out why this is needed. */
        BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
-       if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ||
-           (mode->flags & DRM_MODE_FLAG_INTERLACE) ||
-           mode->hdisplay != outX || mode->vdisplay != outY) {
-               OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_ACTIVE);
-       } else {
-               OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_INACTIVE);
-       }
-
+       OUT_RING  (evo, ctrl);
        BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
-       OUT_RING(evo, outY << 16 | outX);
-       OUT_RING(evo, outY << 16 | outX);
+       OUT_RING  (evo, oY << 16 | oX);
+       OUT_RING  (evo, oY << 16 | oX);
 
        if (update) {
-               BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
-               OUT_RING(evo, 0);
-               FIRE_RING(evo);
+               nv50_display_flip_stop(crtc);
+               nv50_display_sync(dev);
+               nv50_display_flip_next(crtc, crtc->fb, NULL);
        }
 
        return 0;
@@ -333,7 +365,6 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
        nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
        nouveau_bo_unmap(nv_crtc->cursor.nvbo);
        nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
-       kfree(nv_crtc->mode);
        kfree(nv_crtc);
 }
 
@@ -441,39 +472,6 @@ nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
 }
 
-static int
-nv50_crtc_wait_complete(struct drm_crtc *crtc)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
-       struct nv50_display *disp = nv50_display(dev);
-       struct nouveau_channel *evo = disp->master;
-       u64 start;
-       int ret;
-
-       ret = RING_SPACE(evo, 6);
-       if (ret)
-               return ret;
-       BEGIN_RING(evo, 0, 0x0084, 1);
-       OUT_RING  (evo, 0x80000000);
-       BEGIN_RING(evo, 0, 0x0080, 1);
-       OUT_RING  (evo, 0);
-       BEGIN_RING(evo, 0, 0x0084, 1);
-       OUT_RING  (evo, 0x00000000);
-
-       nv_wo32(disp->ntfy, 0x000, 0x00000000);
-       FIRE_RING (evo);
-
-       start = ptimer->read(dev);
-       do {
-               if (nv_ro32(disp->ntfy, 0x000))
-                       return 0;
-       } while (ptimer->read(dev) - start < 2000000000ULL);
-
-       return -EBUSY;
-}
-
 static void
 nv50_crtc_prepare(struct drm_crtc *crtc)
 {
@@ -497,7 +495,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
 
        nv50_crtc_blank(nv_crtc, false);
        drm_vblank_post_modeset(dev, nv_crtc->index);
-       nv50_crtc_wait_complete(crtc);
+       nv50_display_sync(dev);
        nv50_display_flip_next(crtc, crtc->fb, NULL);
 }
 
@@ -593,90 +591,76 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
 }
 
 static int
-nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
-                  struct drm_display_mode *adjusted_mode, int x, int y,
+nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
+                  struct drm_display_mode *mode, int x, int y,
                   struct drm_framebuffer *old_fb)
 {
        struct drm_device *dev = crtc->dev;
        struct nouveau_channel *evo = nv50_display(dev)->master;
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       struct nouveau_connector *nv_connector = NULL;
-       uint32_t hsync_dur,  vsync_dur, hsync_start_to_end, vsync_start_to_end;
-       uint32_t hunk1, vunk1, vunk2a, vunk2b;
+       u32 head = nv_crtc->index * 0x400;
+       u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
+       u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
+       u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
+       u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
+       u32 vblan2e = 0, vblan2s = 1;
        int ret;
 
-       /* Find the connector attached to this CRTC */
-       nv_connector = nouveau_crtc_connector_get(nv_crtc);
-
-       *nv_crtc->mode = *adjusted_mode;
-
-       NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+       /* hw timing description looks like this:
+        *
+        * <sync> <back porch> <---------display---------> <front porch>
+        * ______
+        *       |____________|---------------------------|____________|
+        *
+        *       ^ synce      ^ blanke                    ^ blanks     ^ active
+        *
+        * interlaced modes also have 2 additional values pointing at the end
+        * and start of the next field's blanking period.
+        */
 
-       hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
-       vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
-       hsync_start_to_end = adjusted_mode->htotal - adjusted_mode->hsync_start;
-       vsync_start_to_end = adjusted_mode->vtotal - adjusted_mode->vsync_start;
-       /* I can't give this a proper name, anyone else can? */
-       hunk1 = adjusted_mode->htotal -
-               adjusted_mode->hsync_start + adjusted_mode->hdisplay;
-       vunk1 = adjusted_mode->vtotal -
-               adjusted_mode->vsync_start + adjusted_mode->vdisplay;
-       /* Another strange value, this time only for interlaced adjusted_modes. */
-       vunk2a = 2 * adjusted_mode->vtotal -
-                adjusted_mode->vsync_start + adjusted_mode->vdisplay;
-       vunk2b = adjusted_mode->vtotal -
-                adjusted_mode->vsync_start + adjusted_mode->vtotal;
-
-       if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
-               vsync_dur /= 2;
-               vsync_start_to_end  /= 2;
-               vunk1 /= 2;
-               vunk2a /= 2;
-               vunk2b /= 2;
-               /* magic */
-               if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) {
-                       vsync_start_to_end -= 1;
-                       vunk1 -= 1;
-                       vunk2a -= 1;
-                       vunk2b -= 1;
-               }
+       hactive = mode->htotal;
+       hsynce  = mode->hsync_end - mode->hsync_start - 1;
+       hbackp  = mode->htotal - mode->hsync_end;
+       hblanke = hsynce + hbackp;
+       hfrontp = mode->hsync_start - mode->hdisplay;
+       hblanks = mode->htotal - hfrontp - 1;
+
+       vactive = mode->vtotal * vscan / ilace;
+       vsynce  = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
+       vbackp  = (mode->vtotal - mode->vsync_end) * vscan / ilace;
+       vblanke = vsynce + vbackp;
+       vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+       vblanks = vactive - vfrontp - 1;
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+               vblan2e = vactive + vsynce + vbackp;
+               vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
+               vactive = (vactive * 2) + 1;
        }
 
-       ret = RING_SPACE(evo, 17);
-       if (ret)
-               return ret;
-
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLOCK), 2);
-       OUT_RING(evo, adjusted_mode->clock | 0x800000);
-       OUT_RING(evo, (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 0);
-
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DISPLAY_START), 5);
-       OUT_RING(evo, 0);
-       OUT_RING(evo, (adjusted_mode->vtotal << 16) | adjusted_mode->htotal);
-       OUT_RING(evo, (vsync_dur - 1) << 16 | (hsync_dur - 1));
-       OUT_RING(evo, (vsync_start_to_end - 1) << 16 |
-                       (hsync_start_to_end - 1));
-       OUT_RING(evo, (vunk1 - 1) << 16 | (hunk1 - 1));
-
-       if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
-               BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK0824), 1);
-               OUT_RING(evo, (vunk2b - 1) << 16 | (vunk2a - 1));
-       } else {
-               OUT_RING(evo, 0);
-               OUT_RING(evo, 0);
+       ret = RING_SPACE(evo, 18);
+       if (ret == 0) {
+               BEGIN_RING(evo, 0, 0x0804 + head, 2);
+               OUT_RING  (evo, 0x00800000 | mode->clock);
+               OUT_RING  (evo, (ilace == 2) ? 2 : 0);
+               BEGIN_RING(evo, 0, 0x0810 + head, 6);
+               OUT_RING  (evo, 0x00000000); /* border colour */
+               OUT_RING  (evo, (vactive << 16) | hactive);
+               OUT_RING  (evo, ( vsynce << 16) | hsynce);
+               OUT_RING  (evo, (vblanke << 16) | hblanke);
+               OUT_RING  (evo, (vblanks << 16) | hblanks);
+               OUT_RING  (evo, (vblan2e << 16) | vblan2s);
+               BEGIN_RING(evo, 0, 0x082c + head, 1);
+               OUT_RING  (evo, 0x00000000);
+               BEGIN_RING(evo, 0, 0x0900 + head, 1);
+               OUT_RING  (evo, 0x00000311); /* makes sync channel work */
+               BEGIN_RING(evo, 0, 0x08c8 + head, 1);
+               OUT_RING  (evo, (umode->vdisplay << 16) | umode->hdisplay);
+               BEGIN_RING(evo, 0, 0x08d4 + head, 1);
+               OUT_RING  (evo, 0x00000000); /* screen position */
        }
 
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK082C), 1);
-       OUT_RING(evo, 0);
-
-       /* This is the actual resolution of the mode. */
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, REAL_RES), 1);
-       OUT_RING(evo, (mode->vdisplay << 16) | mode->hdisplay);
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CENTER_OFFSET), 1);
-       OUT_RING(evo, NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(0, 0));
-
-       nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
-       nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
+       nv_crtc->set_dither(nv_crtc, false);
+       nv_crtc->set_scale(nv_crtc, false);
 
        return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
 }
@@ -692,7 +676,7 @@ nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
        if (ret)
                return ret;
 
-       ret = nv50_crtc_wait_complete(crtc);
+       ret = nv50_display_sync(crtc->dev);
        if (ret)
                return ret;
 
@@ -711,7 +695,7 @@ nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
        if (ret)
                return ret;
 
-       return nv50_crtc_wait_complete(crtc);
+       return nv50_display_sync(crtc->dev);
 }
 
 static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
@@ -737,12 +721,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
        if (!nv_crtc)
                return -ENOMEM;
 
-       nv_crtc->mode = kzalloc(sizeof(*nv_crtc->mode), GFP_KERNEL);
-       if (!nv_crtc->mode) {
-               kfree(nv_crtc);
-               return -ENOMEM;
-       }
-
        /* Default CLUT parameters, will be activated on the hw upon
         * first mode set.
         */
@@ -764,7 +742,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
        }
 
        if (ret) {
-               kfree(nv_crtc->mode);
                kfree(nv_crtc);
                return ret;
        }
index 808f3ec..a0f2beb 100644 (file)
@@ -200,11 +200,6 @@ nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
 }
 
 static void
-nv50_dac_prepare(struct drm_encoder *encoder)
-{
-}
-
-static void
 nv50_dac_commit(struct drm_encoder *encoder)
 {
 }
@@ -266,7 +261,7 @@ static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
        .save = nv50_dac_save,
        .restore = nv50_dac_restore,
        .mode_fixup = nv50_dac_mode_fixup,
-       .prepare = nv50_dac_prepare,
+       .prepare = nv50_dac_disconnect,
        .commit = nv50_dac_commit,
        .mode_set = nv50_dac_mode_set,
        .get_crtc = nv50_dac_crtc_get,
index 06de250..7ba28e0 100644 (file)
@@ -50,9 +50,53 @@ nv50_sor_nr(struct drm_device *dev)
        return 4;
 }
 
+static int
+evo_icmd(struct drm_device *dev, int ch, u32 mthd, u32 data)
+{
+       int ret = 0;
+       nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000001);
+       nv_wr32(dev, 0x610304 + (ch * 0x08), data);
+       nv_wr32(dev, 0x610300 + (ch * 0x08), 0x80000001 | mthd);
+       if (!nv_wait(dev, 0x610300 + (ch * 0x08), 0x80000000, 0x00000000))
+               ret = -EBUSY;
+       if (ret || (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO))
+               NV_INFO(dev, "EvoPIO: %d 0x%04x 0x%08x\n", ch, mthd, data);
+       nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000000);
+       return ret;
+}
+
 int
 nv50_display_early_init(struct drm_device *dev)
 {
+       u32 ctrl = nv_rd32(dev, 0x610200);
+       int i;
+
+       /* check if master evo channel is already active, a good a sign as any
+        * that the display engine is in a weird state (hibernate/kexec), if
+        * it is, do our best to reset the display engine...
+        */
+       if ((ctrl & 0x00000003) == 0x00000003) {
+               NV_INFO(dev, "PDISP: EVO(0) 0x%08x, resetting...\n", ctrl);
+
+               /* deactivate both heads first, PDISP will disappear forever
+                * (well, until you power cycle) on some boards as soon as
+                * PMC_ENABLE is hit unless they are..
+                */
+               for (i = 0; i < 2; i++) {
+                       evo_icmd(dev, 0, 0x0880 + (i * 0x400), 0x05000000);
+                       evo_icmd(dev, 0, 0x089c + (i * 0x400), 0);
+                       evo_icmd(dev, 0, 0x0840 + (i * 0x400), 0);
+                       evo_icmd(dev, 0, 0x0844 + (i * 0x400), 0);
+                       evo_icmd(dev, 0, 0x085c + (i * 0x400), 0);
+                       evo_icmd(dev, 0, 0x0874 + (i * 0x400), 0);
+               }
+               evo_icmd(dev, 0, 0x0080, 0);
+
+               /* reset PDISP */
+               nv_mask(dev, 0x000200, 0x40000000, 0x00000000);
+               nv_mask(dev, 0x000200, 0x40000000, 0x40000000);
+       }
+
        return 0;
 }
 
@@ -62,11 +106,40 @@ nv50_display_late_takedown(struct drm_device *dev)
 }
 
 int
-nv50_display_init(struct drm_device *dev)
+nv50_display_sync(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-       struct drm_connector *connector;
+       struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+       struct nv50_display *disp = nv50_display(dev);
+       struct nouveau_channel *evo = disp->master;
+       u64 start;
+       int ret;
+
+       ret = RING_SPACE(evo, 6);
+       if (ret == 0) {
+               BEGIN_RING(evo, 0, 0x0084, 1);
+               OUT_RING  (evo, 0x80000000);
+               BEGIN_RING(evo, 0, 0x0080, 1);
+               OUT_RING  (evo, 0);
+               BEGIN_RING(evo, 0, 0x0084, 1);
+               OUT_RING  (evo, 0x00000000);
+
+               nv_wo32(disp->ntfy, 0x000, 0x00000000);
+               FIRE_RING (evo);
+
+               start = ptimer->read(dev);
+               do {
+                       if (nv_ro32(disp->ntfy, 0x000))
+                               return 0;
+               } while (ptimer->read(dev) - start < 2000000000ULL);
+       }
+
+       return -EBUSY;
+}
+
+int
+nv50_display_init(struct drm_device *dev)
+{
        struct nouveau_channel *evo;
        int ret, i;
        u32 val;
@@ -161,16 +234,6 @@ nv50_display_init(struct drm_device *dev)
                     NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
                     NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
 
-       /* enable hotplug interrupts */
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               struct nouveau_connector *conn = nouveau_connector(connector);
-
-               if (conn->dcb->gpio_tag == 0xff)
-                       continue;
-
-               pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
-       }
-
        ret = nv50_evo_init(dev);
        if (ret)
                return ret;
@@ -178,36 +241,19 @@ nv50_display_init(struct drm_device *dev)
 
        nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
 
-       ret = RING_SPACE(evo, 15);
+       ret = RING_SPACE(evo, 3);
        if (ret)
                return ret;
        BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
-       OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
-       OUT_RING(evo, NvEvoSync);
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
-       OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
-       OUT_RING(evo, 0);
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, DISPLAY_START), 1);
-       OUT_RING(evo, 0);
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
-       OUT_RING(evo, 0);
-       /* required to make display sync channels not hate life */
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK900), 1);
-       OUT_RING  (evo, 0x00000311);
-       BEGIN_RING(evo, 0, NV50_EVO_CRTC(1, UNK900), 1);
-       OUT_RING  (evo, 0x00000311);
-       FIRE_RING(evo);
-       if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
-               NV_ERROR(dev, "evo pushbuf stalled\n");
-
+       OUT_RING  (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
+       OUT_RING  (evo, NvEvoSync);
 
-       return 0;
+       return nv50_display_sync(dev);
 }
 
-static int nv50_display_disable(struct drm_device *dev)
+void
+nv50_display_fini(struct drm_device *dev)
 {
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nv50_display *disp = nv50_display(dev);
        struct nouveau_channel *evo = disp->master;
        struct drm_crtc *drm_crtc;
@@ -270,18 +316,10 @@ static int nv50_display_disable(struct drm_device *dev)
 
        /* disable interrupts. */
        nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
-
-       /* disable hotplug interrupts */
-       nv_wr32(dev, 0xe054, 0xffffffff);
-       nv_wr32(dev, 0xe050, 0x00000000);
-       if (dev_priv->chipset >= 0x90) {
-               nv_wr32(dev, 0xe074, 0xffffffff);
-               nv_wr32(dev, 0xe070, 0x00000000);
-       }
-       return 0;
 }
 
-int nv50_display_create(struct drm_device *dev)
+int
+nv50_display_create(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct dcb_table *dcb = &dev_priv->vbios.dcb;
@@ -341,7 +379,7 @@ int nv50_display_create(struct drm_device *dev)
        tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
        nouveau_irq_register(dev, 26, nv50_display_isr);
 
-       ret = nv50_display_init(dev);
+       ret = nv50_evo_create(dev);
        if (ret) {
                nv50_display_destroy(dev);
                return ret;
@@ -357,7 +395,7 @@ nv50_display_destroy(struct drm_device *dev)
 
        NV_DEBUG_KMS(dev, "\n");
 
-       nv50_display_disable(dev);
+       nv50_evo_destroy(dev);
        nouveau_irq_unregister(dev, 26);
        kfree(disp);
 }
@@ -521,7 +559,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
                } else {
                        /* determine number of lvds links */
                        if (nv_connector && nv_connector->edid &&
-                           nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG) {
+                           nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
                                /* http://www.spwg.org */
                                if (((u8 *)nv_connector->edid)[121] == 2)
                                        script |= 0x0100;
@@ -722,8 +760,8 @@ nv50_display_unk20_handler(struct drm_device *dev)
        if (crtc >= 0) {
                pclk  = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
                pclk &= 0x003fffff;
-
-               nv50_crtc_set_clock(dev, crtc, pclk);
+               if (pclk)
+                       nv50_crtc_set_clock(dev, crtc, pclk);
 
                tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
                tmp &= ~0x000000f;
index c2da503..95874f7 100644 (file)
@@ -69,14 +69,18 @@ int nv50_display_early_init(struct drm_device *dev);
 void nv50_display_late_takedown(struct drm_device *dev);
 int nv50_display_create(struct drm_device *dev);
 int nv50_display_init(struct drm_device *dev);
+void nv50_display_fini(struct drm_device *dev);
 void nv50_display_destroy(struct drm_device *dev);
 int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
 int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
 
+int  nv50_display_sync(struct drm_device *);
 int  nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
                            struct nouveau_channel *chan);
 void nv50_display_flip_stop(struct drm_crtc *);
 
+int  nv50_evo_create(struct drm_device *dev);
+void nv50_evo_destroy(struct drm_device *dev);
 int  nv50_evo_init(struct drm_device *dev);
 void nv50_evo_fini(struct drm_device *dev);
 void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
index c99d975..9b962e9 100644 (file)
@@ -218,7 +218,7 @@ nv50_evo_channel_fini(struct nouveau_channel *evo)
        }
 }
 
-static void
+void
 nv50_evo_destroy(struct drm_device *dev)
 {
        struct nv50_display *disp = nv50_display(dev);
@@ -235,7 +235,7 @@ nv50_evo_destroy(struct drm_device *dev)
        nv50_evo_channel_del(&disp->master);
 }
 
-static int
+int
 nv50_evo_create(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -388,12 +388,6 @@ nv50_evo_init(struct drm_device *dev)
        struct nv50_display *disp = nv50_display(dev);
        int ret, i;
 
-       if (!disp->master) {
-               ret = nv50_evo_create(dev);
-               if (ret)
-                       return ret;
-       }
-
        ret = nv50_evo_channel_init(disp->master);
        if (ret)
                return ret;
@@ -420,6 +414,4 @@ nv50_evo_fini(struct drm_device *dev)
 
        if (disp->master)
                nv50_evo_channel_fini(disp->master);
-
-       nv50_evo_destroy(dev);
 }
index c34a074..3bc2a56 100644 (file)
@@ -230,6 +230,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
        struct drm_device *dev = chan->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_gpuobj *ramfc = NULL;
+        uint64_t ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
        unsigned long flags;
        int ret;
 
@@ -280,8 +281,9 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
        nv_wo32(ramfc, 0x7c, 0x30000001);
        nv_wo32(ramfc, 0x78, 0x00000000);
        nv_wo32(ramfc, 0x3c, 0x403f6078);
-       nv_wo32(ramfc, 0x50, chan->pushbuf_base + chan->dma.ib_base * 4);
-       nv_wo32(ramfc, 0x54, drm_order(chan->dma.ib_max + 1) << 16);
+       nv_wo32(ramfc, 0x50, lower_32_bits(ib_offset));
+       nv_wo32(ramfc, 0x54, upper_32_bits(ib_offset) |
+                drm_order(chan->dma.ib_max + 1) << 16);
 
        if (dev_priv->chipset != 0x50) {
                nv_wo32(chan->ramin, 0, chan->id);
index 793a5cc..f429e6a 100644 (file)
 #include "drmP.h"
 #include "nouveau_drv.h"
 #include "nouveau_hw.h"
+#include "nouveau_gpio.h"
 
 #include "nv50_display.h"
 
-static void nv50_gpio_isr(struct drm_device *dev);
-static void nv50_gpio_isr_bh(struct work_struct *work);
-
-struct nv50_gpio_priv {
-       struct list_head handlers;
-       spinlock_t lock;
-};
-
-struct nv50_gpio_handler {
-       struct drm_device *dev;
-       struct list_head head;
-       struct work_struct work;
-       bool inhibit;
-
-       struct dcb_gpio_entry *gpio;
-
-       void (*handler)(void *data, int state);
-       void *data;
-};
-
 static int
-nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift)
+nv50_gpio_location(int line, u32 *reg, u32 *shift)
 {
        const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
 
-       if (gpio->line >= 32)
+       if (line >= 32)
                return -EINVAL;
 
-       *reg = nv50_gpio_reg[gpio->line >> 3];
-       *shift = (gpio->line & 7) << 2;
+       *reg = nv50_gpio_reg[line >> 3];
+       *shift = (line & 7) << 2;
        return 0;
 }
 
 int
-nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
+nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out)
 {
-       struct dcb_gpio_entry *gpio;
-       uint32_t r, s, v;
-
-       gpio = nouveau_bios_gpio_entry(dev, tag);
-       if (!gpio)
-               return -ENOENT;
+       u32 reg, shift;
 
-       if (nv50_gpio_location(gpio, &r, &s))
+       if (nv50_gpio_location(line, &reg, &shift))
                return -EINVAL;
 
-       v = nv_rd32(dev, r) >> (s + 2);
-       return ((v & 1) == (gpio->state[1] & 1));
+       nv_mask(dev, reg, 7 << shift, (((dir ^ 1) << 1) | out) << shift);
+       return 0;
 }
 
 int
-nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
+nv50_gpio_sense(struct drm_device *dev, int line)
 {
-       struct dcb_gpio_entry *gpio;
-       uint32_t r, s, v;
-
-       gpio = nouveau_bios_gpio_entry(dev, tag);
-       if (!gpio)
-               return -ENOENT;
+       u32 reg, shift;
 
-       if (nv50_gpio_location(gpio, &r, &s))
+       if (nv50_gpio_location(line, &reg, &shift))
                return -EINVAL;
 
-       v  = nv_rd32(dev, r) & ~(0x3 << s);
-       v |= (gpio->state[state] ^ 2) << s;
-       nv_wr32(dev, r, v);
-       return 0;
+       return !!(nv_rd32(dev, reg) & (4 << shift));
 }
 
-int
-nvd0_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
+void
+nv50_gpio_irq_enable(struct drm_device *dev, int line, bool on)
 {
-       struct dcb_gpio_entry *gpio;
-       u32 v;
-
-       gpio = nouveau_bios_gpio_entry(dev, tag);
-       if (!gpio)
-               return -ENOENT;
+       u32 reg  = line < 16 ? 0xe050 : 0xe070;
+       u32 mask = 0x00010001 << (line & 0xf);
 
-       v  = nv_rd32(dev, 0x00d610 + (gpio->line * 4));
-       v &= 0x00004000;
-       return (!!v == (gpio->state[1] & 1));
+       nv_wr32(dev, reg + 4, mask);
+       nv_mask(dev, reg + 0, mask, on ? mask : 0);
 }
 
 int
-nvd0_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
+nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out)
 {
-       struct dcb_gpio_entry *gpio;
-       u32 v;
-
-       gpio = nouveau_bios_gpio_entry(dev, tag);
-       if (!gpio)
-               return -ENOENT;
-
-       v = gpio->state[state] ^ 2;
-
-       nv_mask(dev, 0x00d610 + (gpio->line * 4), 0x00003000, v << 12);
+       u32 data = ((dir ^ 1) << 13) | (out << 12);
+       nv_mask(dev, 0x00d610 + (line * 4), 0x00003000, data);
+       nv_mask(dev, 0x00d604, 0x00000001, 0x00000001); /* update? */
        return 0;
 }
 
 int
-nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag,
-                      void (*handler)(void *, int), void *data)
+nvd0_gpio_sense(struct drm_device *dev, int line)
 {
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-       struct nv50_gpio_priv *priv = pgpio->priv;
-       struct nv50_gpio_handler *gpioh;
-       struct dcb_gpio_entry *gpio;
-       unsigned long flags;
-
-       gpio = nouveau_bios_gpio_entry(dev, tag);
-       if (!gpio)
-               return -ENOENT;
-
-       gpioh = kzalloc(sizeof(*gpioh), GFP_KERNEL);
-       if (!gpioh)
-               return -ENOMEM;
-
-       INIT_WORK(&gpioh->work, nv50_gpio_isr_bh);
-       gpioh->dev  = dev;
-       gpioh->gpio = gpio;
-       gpioh->handler = handler;
-       gpioh->data = data;
-
-       spin_lock_irqsave(&priv->lock, flags);
-       list_add(&gpioh->head, &priv->handlers);
-       spin_unlock_irqrestore(&priv->lock, flags);
-       return 0;
+       return !!(nv_rd32(dev, 0x00d610 + (line * 4)) & 0x00004000);
 }
 
-void
-nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
-                        void (*handler)(void *, int), void *data)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-       struct nv50_gpio_priv *priv = pgpio->priv;
-       struct nv50_gpio_handler *gpioh, *tmp;
-       struct dcb_gpio_entry *gpio;
-       LIST_HEAD(tofree);
-       unsigned long flags;
-
-       gpio = nouveau_bios_gpio_entry(dev, tag);
-       if (!gpio)
-               return;
-
-       spin_lock_irqsave(&priv->lock, flags);
-       list_for_each_entry_safe(gpioh, tmp, &priv->handlers, head) {
-               if (gpioh->gpio != gpio ||
-                   gpioh->handler != handler ||
-                   gpioh->data != data)
-                       continue;
-               list_move(&gpioh->head, &tofree);
-       }
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       list_for_each_entry_safe(gpioh, tmp, &tofree, head) {
-               flush_work_sync(&gpioh->work);
-               kfree(gpioh);
-       }
-}
-
-bool
-nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on)
-{
-       struct dcb_gpio_entry *gpio;
-       u32 reg, mask;
-
-       gpio = nouveau_bios_gpio_entry(dev, tag);
-       if (!gpio)
-               return false;
-
-       reg  = gpio->line < 16 ? 0xe050 : 0xe070;
-       mask = 0x00010001 << (gpio->line & 0xf);
-
-       nv_wr32(dev, reg + 4, mask);
-       reg = nv_mask(dev, reg + 0, mask, on ? mask : 0);
-       return (reg & mask) == mask;
-}
-
-static int
-nv50_gpio_create(struct drm_device *dev)
+static void
+nv50_gpio_isr(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-       struct nv50_gpio_priv *priv;
-
-       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
+       u32 intr0, intr1 = 0;
+       u32 hi, lo;
 
-       INIT_LIST_HEAD(&priv->handlers);
-       spin_lock_init(&priv->lock);
-       pgpio->priv = priv;
-       return 0;
-}
+       intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
+       if (dev_priv->chipset >= 0x90)
+               intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
 
-static void
-nv50_gpio_destroy(struct drm_device *dev)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+       hi = (intr0 & 0x0000ffff) | (intr1 << 16);
+       lo = (intr0 >> 16) | (intr1 & 0xffff0000);
+       nouveau_gpio_isr(dev, 0, hi | lo);
 
-       kfree(pgpio->priv);
-       pgpio->priv = NULL;
+       nv_wr32(dev, 0xe054, intr0);
+       if (dev_priv->chipset >= 0x90)
+               nv_wr32(dev, 0xe074, intr1);
 }
 
 int
 nv50_gpio_init(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-       int ret;
-
-       if (!pgpio->priv) {
-               ret = nv50_gpio_create(dev);
-               if (ret)
-                       return ret;
-       }
 
        /* disable, and ack any pending gpio interrupts */
        nv_wr32(dev, 0xe050, 0x00000000);
@@ -270,64 +136,4 @@ nv50_gpio_fini(struct drm_device *dev)
        if (dev_priv->chipset >= 0x90)
                nv_wr32(dev, 0xe070, 0x00000000);
        nouveau_irq_unregister(dev, 21);
-
-       nv50_gpio_destroy(dev);
-}
-
-static void
-nv50_gpio_isr_bh(struct work_struct *work)
-{
-       struct nv50_gpio_handler *gpioh =
-               container_of(work, struct nv50_gpio_handler, work);
-       struct drm_nouveau_private *dev_priv = gpioh->dev->dev_private;
-       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-       struct nv50_gpio_priv *priv = pgpio->priv;
-       unsigned long flags;
-       int state;
-
-       state = pgpio->get(gpioh->dev, gpioh->gpio->tag);
-       if (state < 0)
-               return;
-
-       gpioh->handler(gpioh->data, state);
-
-       spin_lock_irqsave(&priv->lock, flags);
-       gpioh->inhibit = false;
-       spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static void
-nv50_gpio_isr(struct drm_device *dev)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-       struct nv50_gpio_priv *priv = pgpio->priv;
-       struct nv50_gpio_handler *gpioh;
-       u32 intr0, intr1 = 0;
-       u32 hi, lo, ch;
-
-       intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
-       if (dev_priv->chipset >= 0x90)
-               intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
-
-       hi = (intr0 & 0x0000ffff) | (intr1 << 16);
-       lo = (intr0 >> 16) | (intr1 & 0xffff0000);
-       ch = hi | lo;
-
-       nv_wr32(dev, 0xe054, intr0);
-       if (dev_priv->chipset >= 0x90)
-               nv_wr32(dev, 0xe074, intr1);
-
-       spin_lock(&priv->lock);
-       list_for_each_entry(gpioh, &priv->handlers, head) {
-               if (!(ch & (1 << gpioh->gpio->line)))
-                       continue;
-
-               if (gpioh->inhibit)
-                       continue;
-               gpioh->inhibit = true;
-
-               schedule_work(&gpioh->work);
-       }
-       spin_unlock(&priv->lock);
 }
index ac601f7..33d5711 100644 (file)
@@ -616,9 +616,9 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
                        }
                        break;
                case 7: /* MP error */
-                       if (ustatus & 0x00010000) {
+                       if (ustatus & 0x04030000) {
                                nv50_pgraph_mp_trap(dev, i, display);
-                               ustatus &= ~0x00010000;
+                               ustatus &= ~0x04030000;
                        }
                        break;
                case 8: /* TPDMA error */
index 3d5a86b..0393721 100644 (file)
 #include "drmP.h"
 #include "nouveau_drv.h"
 #include "nouveau_bios.h"
+#include "nouveau_hw.h"
 #include "nouveau_pm.h"
+#include "nouveau_hwsq.h"
 
-struct nv50_pm_state {
-       struct nouveau_pm_level *perflvl;
-       struct pll_lims pll;
-       enum pll_types type;
-       int N, M, P;
+enum clk_src {
+       clk_src_crystal,
+       clk_src_href,
+       clk_src_hclk,
+       clk_src_hclkm3,
+       clk_src_hclkm3d2,
+       clk_src_host,
+       clk_src_nvclk,
+       clk_src_sclk,
+       clk_src_mclk,
+       clk_src_vdec,
+       clk_src_dom6
 };
 
+static u32 read_clk(struct drm_device *, enum clk_src);
+
+static u32
+read_div(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+       switch (dev_priv->chipset) {
+       case 0x50: /* it exists, but only has bit 31, not the dividers.. */
+       case 0x84:
+       case 0x86:
+       case 0x98:
+       case 0xa0:
+               return nv_rd32(dev, 0x004700);
+       case 0x92:
+       case 0x94:
+       case 0x96:
+               return nv_rd32(dev, 0x004800);
+       default:
+               return 0x00000000;
+       }
+}
+
+static u32
+read_pll_src(struct drm_device *dev, u32 base)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       u32 coef, ref = read_clk(dev, clk_src_crystal);
+       u32 rsel = nv_rd32(dev, 0x00e18c);
+       int P, N, M, id;
+
+       switch (dev_priv->chipset) {
+       case 0x50:
+       case 0xa0:
+               switch (base) {
+               case 0x4020:
+               case 0x4028: id = !!(rsel & 0x00000004); break;
+               case 0x4008: id = !!(rsel & 0x00000008); break;
+               case 0x4030: id = 0; break;
+               default:
+                       NV_ERROR(dev, "ref: bad pll 0x%06x\n", base);
+                       return 0;
+               }
+
+               coef = nv_rd32(dev, 0x00e81c + (id * 0x0c));
+               ref *=  (coef & 0x01000000) ? 2 : 4;
+               P    =  (coef & 0x00070000) >> 16;
+               N    = ((coef & 0x0000ff00) >> 8) + 1;
+               M    = ((coef & 0x000000ff) >> 0) + 1;
+               break;
+       case 0x84:
+       case 0x86:
+       case 0x92:
+               coef = nv_rd32(dev, 0x00e81c);
+               P    = (coef & 0x00070000) >> 16;
+               N    = (coef & 0x0000ff00) >> 8;
+               M    = (coef & 0x000000ff) >> 0;
+               break;
+       case 0x94:
+       case 0x96:
+       case 0x98:
+               rsel = nv_rd32(dev, 0x00c050);
+               switch (base) {
+               case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
+               case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
+               case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
+               case 0x4030: rsel = 3; break;
+               default:
+                       NV_ERROR(dev, "ref: bad pll 0x%06x\n", base);
+                       return 0;
+               }
+
+               switch (rsel) {
+               case 0: id = 1; break;
+               case 1: return read_clk(dev, clk_src_crystal);
+               case 2: return read_clk(dev, clk_src_href);
+               case 3: id = 0; break;
+               }
+
+               coef =  nv_rd32(dev, 0x00e81c + (id * 0x28));
+               P    = (nv_rd32(dev, 0x00e824 + (id * 0x28)) >> 16) & 7;
+               P   += (coef & 0x00070000) >> 16;
+               N    = (coef & 0x0000ff00) >> 8;
+               M    = (coef & 0x000000ff) >> 0;
+               break;
+       default:
+               BUG_ON(1);
+       }
+
+       if (M)
+               return (ref * N / M) >> P;
+       return 0;
+}
+
+static u32
+read_pll_ref(struct drm_device *dev, u32 base)
+{
+       u32 src, mast = nv_rd32(dev, 0x00c040);
+
+       switch (base) {
+       case 0x004028:
+               src = !!(mast & 0x00200000);
+               break;
+       case 0x004020:
+               src = !!(mast & 0x00400000);
+               break;
+       case 0x004008:
+               src = !!(mast & 0x00010000);
+               break;
+       case 0x004030:
+               src = !!(mast & 0x02000000);
+               break;
+       case 0x00e810:
+               return read_clk(dev, clk_src_crystal);
+       default:
+               NV_ERROR(dev, "bad pll 0x%06x\n", base);
+               return 0;
+       }
+
+       if (src)
+               return read_clk(dev, clk_src_href);
+       return read_pll_src(dev, base);
+}
+
+static u32
+read_pll(struct drm_device *dev, u32 base)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       u32 mast = nv_rd32(dev, 0x00c040);
+       u32 ctrl = nv_rd32(dev, base + 0);
+       u32 coef = nv_rd32(dev, base + 4);
+       u32 ref = read_pll_ref(dev, base);
+       u32 clk = 0;
+       int N1, N2, M1, M2;
+
+       if (base == 0x004028 && (mast & 0x00100000)) {
+               /* wtf, appears to only disable post-divider on nva0 */
+               if (dev_priv->chipset != 0xa0)
+                       return read_clk(dev, clk_src_dom6);
+       }
+
+       N2 = (coef & 0xff000000) >> 24;
+       M2 = (coef & 0x00ff0000) >> 16;
+       N1 = (coef & 0x0000ff00) >> 8;
+       M1 = (coef & 0x000000ff);
+       if ((ctrl & 0x80000000) && M1) {
+               clk = ref * N1 / M1;
+               if ((ctrl & 0x40000100) == 0x40000000) {
+                       if (M2)
+                               clk = clk * N2 / M2;
+                       else
+                               clk = 0;
+               }
+       }
+
+       return clk;
+}
+
+static u32
+read_clk(struct drm_device *dev, enum clk_src src)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       u32 mast = nv_rd32(dev, 0x00c040);
+       u32 P = 0;
+
+       switch (src) {
+       case clk_src_crystal:
+               return dev_priv->crystal;
+       case clk_src_href:
+               return 100000; /* PCIE reference clock */
+       case clk_src_hclk:
+               return read_clk(dev, clk_src_href) * 27778 / 10000;
+       case clk_src_hclkm3:
+               return read_clk(dev, clk_src_hclk) * 3;
+       case clk_src_hclkm3d2:
+               return read_clk(dev, clk_src_hclk) * 3 / 2;
+       case clk_src_host:
+               switch (mast & 0x30000000) {
+               case 0x00000000: return read_clk(dev, clk_src_href);
+               case 0x10000000: break;
+               case 0x20000000: /* !0x50 */
+               case 0x30000000: return read_clk(dev, clk_src_hclk);
+               }
+               break;
+       case clk_src_nvclk:
+               if (!(mast & 0x00100000))
+                       P = (nv_rd32(dev, 0x004028) & 0x00070000) >> 16;
+               switch (mast & 0x00000003) {
+               case 0x00000000: return read_clk(dev, clk_src_crystal) >> P;
+               case 0x00000001: return read_clk(dev, clk_src_dom6);
+               case 0x00000002: return read_pll(dev, 0x004020) >> P;
+               case 0x00000003: return read_pll(dev, 0x004028) >> P;
+               }
+               break;
+       case clk_src_sclk:
+               P = (nv_rd32(dev, 0x004020) & 0x00070000) >> 16;
+               switch (mast & 0x00000030) {
+               case 0x00000000:
+                       if (mast & 0x00000080)
+                               return read_clk(dev, clk_src_host) >> P;
+                       return read_clk(dev, clk_src_crystal) >> P;
+               case 0x00000010: break;
+               case 0x00000020: return read_pll(dev, 0x004028) >> P;
+               case 0x00000030: return read_pll(dev, 0x004020) >> P;
+               }
+               break;
+       case clk_src_mclk:
+               P = (nv_rd32(dev, 0x004008) & 0x00070000) >> 16;
+               if (nv_rd32(dev, 0x004008) & 0x00000200) {
+                       switch (mast & 0x0000c000) {
+                       case 0x00000000:
+                               return read_clk(dev, clk_src_crystal) >> P;
+                       case 0x00008000:
+                       case 0x0000c000:
+                               return read_clk(dev, clk_src_href) >> P;
+                       }
+               } else {
+                       return read_pll(dev, 0x004008) >> P;
+               }
+               break;
+       case clk_src_vdec:
+               P = (read_div(dev) & 0x00000700) >> 8;
+               switch (dev_priv->chipset) {
+               case 0x84:
+               case 0x86:
+               case 0x92:
+               case 0x94:
+               case 0x96:
+               case 0xa0:
+                       switch (mast & 0x00000c00) {
+                       case 0x00000000:
+                               if (dev_priv->chipset == 0xa0) /* wtf?? */
+                                       return read_clk(dev, clk_src_nvclk) >> P;
+                               return read_clk(dev, clk_src_crystal) >> P;
+                       case 0x00000400:
+                               return 0;
+                       case 0x00000800:
+                               if (mast & 0x01000000)
+                                       return read_pll(dev, 0x004028) >> P;
+                               return read_pll(dev, 0x004030) >> P;
+                       case 0x00000c00:
+                               return read_clk(dev, clk_src_nvclk) >> P;
+                       }
+                       break;
+               case 0x98:
+                       switch (mast & 0x00000c00) {
+                       case 0x00000000:
+                               return read_clk(dev, clk_src_nvclk) >> P;
+                       case 0x00000400:
+                               return 0;
+                       case 0x00000800:
+                               return read_clk(dev, clk_src_hclkm3d2) >> P;
+                       case 0x00000c00:
+                               return read_clk(dev, clk_src_mclk) >> P;
+                       }
+                       break;
+               }
+               break;
+       case clk_src_dom6:
+               switch (dev_priv->chipset) {
+               case 0x50:
+               case 0xa0:
+                       return read_pll(dev, 0x00e810) >> 2;
+               case 0x84:
+               case 0x86:
+               case 0x92:
+               case 0x94:
+               case 0x96:
+               case 0x98:
+                       P = (read_div(dev) & 0x00000007) >> 0;
+                       switch (mast & 0x0c000000) {
+                       case 0x00000000: return read_clk(dev, clk_src_href);
+                       case 0x04000000: break;
+                       case 0x08000000: return read_clk(dev, clk_src_hclk);
+                       case 0x0c000000:
+                               return read_clk(dev, clk_src_hclkm3) >> P;
+                       }
+                       break;
+               default:
+                       break;
+               }
+       default:
+               break;
+       }
+
+       NV_DEBUG(dev, "unknown clock source %d 0x%08x\n", src, mast);
+       return 0;
+}
+
 int
-nv50_pm_clock_get(struct drm_device *dev, u32 id)
+nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 {
-       struct pll_lims pll;
-       int P, N, M, ret;
-       u32 reg0, reg1;
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       if (dev_priv->chipset == 0xaa ||
+           dev_priv->chipset == 0xac)
+               return 0;
 
-       ret = get_pll_limits(dev, id, &pll);
+       perflvl->core   = read_clk(dev, clk_src_nvclk);
+       perflvl->shader = read_clk(dev, clk_src_sclk);
+       perflvl->memory = read_clk(dev, clk_src_mclk);
+       if (dev_priv->chipset != 0x50) {
+               perflvl->vdec = read_clk(dev, clk_src_vdec);
+               perflvl->dom6 = read_clk(dev, clk_src_dom6);
+       }
+
+       return 0;
+}
+
+struct nv50_pm_state {
+       struct hwsq_ucode mclk_hwsq;
+       u32 mscript;
+
+       u32 emast;
+       u32 nctrl;
+       u32 ncoef;
+       u32 sctrl;
+       u32 scoef;
+
+       u32 amast;
+       u32 pdivs;
+};
+
+static u32
+calc_pll(struct drm_device *dev, u32 reg, struct pll_lims *pll,
+        u32 clk, int *N1, int *M1, int *log2P)
+{
+       struct nouveau_pll_vals coef;
+       int ret;
+
+       ret = get_pll_limits(dev, reg, pll);
        if (ret)
-               return ret;
+               return 0;
+
+       pll->vco2.maxfreq = 0;
+       pll->refclk = read_pll_ref(dev, reg);
+       if (!pll->refclk)
+               return 0;
 
-       reg0 = nv_rd32(dev, pll.reg + 0);
-       reg1 = nv_rd32(dev, pll.reg + 4);
+       ret = nouveau_calc_pll_mnp(dev, pll, clk, &coef);
+       if (ret == 0)
+               return 0;
 
-       if ((reg0 & 0x80000000) == 0) {
-               if (id == PLL_SHADER) {
-                       NV_DEBUG(dev, "Shader PLL is disabled. "
-                               "Shader clock is twice the core\n");
-                       ret = nv50_pm_clock_get(dev, PLL_CORE);
-                       if (ret > 0)
-                               return ret << 1;
-               } else if (id == PLL_MEMORY) {
-                       NV_DEBUG(dev, "Memory PLL is disabled. "
-                               "Memory clock is equal to the ref_clk\n");
-                       return pll.refclk;
+       *N1 = coef.N1;
+       *M1 = coef.M1;
+       *log2P = coef.log2P;
+       return ret;
+}
+
+static inline u32
+calc_div(u32 src, u32 target, int *div)
+{
+       u32 clk0 = src, clk1 = src;
+       for (*div = 0; *div <= 7; (*div)++) {
+               if (clk0 <= target) {
+                       clk1 = clk0 << (*div ? 1 : 0);
+                       break;
                }
+               clk0 >>= 1;
+       }
+
+       if (target - clk0 <= clk1 - target)
+               return clk0;
+       (*div)--;
+       return clk1;
+}
+
+static inline u32
+clk_same(u32 a, u32 b)
+{
+       return ((a / 1000) == (b / 1000));
+}
+
+static int
+calc_mclk(struct drm_device *dev, u32 freq, struct hwsq_ucode *hwsq)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct pll_lims pll;
+       u32 mast = nv_rd32(dev, 0x00c040);
+       u32 ctrl = nv_rd32(dev, 0x004008);
+       u32 coef = nv_rd32(dev, 0x00400c);
+       u32 orig = ctrl;
+       u32 crtc_mask = 0;
+       int N, M, P;
+       int ret, i;
+
+       /* use pcie refclock if possible, otherwise use mpll */
+       ctrl &= ~0x81ff0200;
+       if (clk_same(freq, read_clk(dev, clk_src_href))) {
+               ctrl |= 0x00000200 | (pll.log2p_bias << 19);
+       } else {
+               ret = calc_pll(dev, 0x4008, &pll, freq, &N, &M, &P);
+               if (ret == 0)
+                       return -EINVAL;
+
+               ctrl |= 0x80000000 | (P << 22) | (P << 16);
+               ctrl |= pll.log2p_bias << 19;
+               coef  = (N << 8) | M;
+       }
+
+       mast &= ~0xc0000000; /* get MCLK_2 from HREF */
+       mast |=  0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */
+
+       /* determine active crtcs */
+       for (i = 0; i < 2; i++) {
+               if (nv_rd32(dev, NV50_PDISPLAY_CRTC_C(i, CLOCK)))
+                       crtc_mask |= (1 << i);
+       }
+
+       /* build the ucode which will reclock the memory for us */
+       hwsq_init(hwsq);
+       if (crtc_mask) {
+               hwsq_op5f(hwsq, crtc_mask, 0x00); /* wait for scanout */
+               hwsq_op5f(hwsq, crtc_mask, 0x01); /* wait for vblank */
        }
+       if (dev_priv->chipset >= 0x92)
+               hwsq_wr32(hwsq, 0x611200, 0x00003300); /* disable scanout */
+       hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
+       hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */
+
+       /* prepare memory controller */
+       hwsq_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge banks and idle */
+       hwsq_wr32(hwsq, 0x1002d0, 0x00000001); /* force refresh */
+       hwsq_wr32(hwsq, 0x100210, 0x00000000); /* stop the automatic refresh */
+       hwsq_wr32(hwsq, 0x1002dc, 0x00000001); /* start self refresh mode */
 
-       P = (reg0 & 0x00070000) >> 16;
-       N = (reg1 & 0x0000ff00) >> 8;
-       M = (reg1 & 0x000000ff);
+       /* reclock memory */
+       hwsq_wr32(hwsq, 0xc040, mast);
+       hwsq_wr32(hwsq, 0x4008, orig | 0x00000200); /* bypass MPLL */
+       hwsq_wr32(hwsq, 0x400c, coef);
+       hwsq_wr32(hwsq, 0x4008, ctrl);
 
-       return ((pll.refclk * N / M) >> P);
+       /* restart memory controller */
+       hwsq_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge banks and idle */
+       hwsq_wr32(hwsq, 0x1002dc, 0x00000000); /* stop self refresh mode */
+       hwsq_wr32(hwsq, 0x100210, 0x80000000); /* restart automatic refresh */
+       hwsq_usec(hwsq, 12); /* wait for the PLL to stabilize */
+
+       hwsq_usec(hwsq, 48); /* may be unnecessary: causes flickering */
+       hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
+       hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */
+       if (dev_priv->chipset >= 0x92)
+               hwsq_wr32(hwsq, 0x611200, 0x00003330); /* enable scanout */
+       hwsq_fini(hwsq);
+       return 0;
 }
 
 void *
-nv50_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
-                 u32 id, int khz)
+nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 {
-       struct nv50_pm_state *state;
-       int dummy, ret;
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nv50_pm_state *info;
+       struct pll_lims pll;
+       int ret = -EINVAL;
+       int N, M, P1, P2;
+       u32 clk, out;
 
-       state = kzalloc(sizeof(*state), GFP_KERNEL);
-       if (!state)
+       if (dev_priv->chipset == 0xaa ||
+           dev_priv->chipset == 0xac)
+               return ERR_PTR(-ENODEV);
+
+       info = kmalloc(sizeof(*info), GFP_KERNEL);
+       if (!info)
                return ERR_PTR(-ENOMEM);
-       state->type = id;
-       state->perflvl = perflvl;
 
-       ret = get_pll_limits(dev, id, &state->pll);
-       if (ret < 0) {
-               kfree(state);
-               return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
+       /* core: for the moment at least, always use nvpll */
+       clk = calc_pll(dev, 0x4028, &pll, perflvl->core, &N, &M, &P1);
+       if (clk == 0)
+               goto error;
+
+       info->emast = 0x00000003;
+       info->nctrl = 0x80000000 | (P1 << 19) | (P1 << 16);
+       info->ncoef = (N << 8) | M;
+
+       /* shader: tie to nvclk if possible, otherwise use spll.  have to be
+        * very careful that the shader clock is at least twice the core, or
+        * some chipsets will be very unhappy.  i expect most or all of these
+        * cases will be handled by tying to nvclk, but it's possible there's
+        * corners
+        */
+       if (P1-- && perflvl->shader == (perflvl->core << 1)) {
+               info->emast |= 0x00000020;
+               info->sctrl  = 0x00000000 | (P1 << 19) | (P1 << 16);
+               info->scoef  = nv_rd32(dev, 0x004024);
+       } else {
+               clk = calc_pll(dev, 0x4020, &pll, perflvl->shader, &N, &M, &P1);
+               if (clk == 0)
+                       goto error;
+
+               info->emast |= 0x00000030;
+               info->sctrl  = 0x80000000 | (P1 << 19) | (P1 << 16);
+               info->scoef  = (N << 8) | M;
+       }
+
+       /* memory: build hwsq ucode which we'll use to reclock memory */
+       info->mclk_hwsq.len = 0;
+       if (perflvl->memory) {
+               clk = calc_mclk(dev, perflvl->memory, &info->mclk_hwsq);
+               if (clk < 0) {
+                       ret = clk;
+                       goto error;
+               }
+
+               info->mscript = perflvl->memscript;
+       }
+
+       /* vdec: avoid modifying xpll until we know exactly how the other
+        * clock domains work, i suspect at least some of them can also be
+        * tied to xpll...
+        */
+       info->amast = nv_rd32(dev, 0x00c040);
+       info->pdivs = read_div(dev);
+       if (perflvl->vdec) {
+               /* see how close we can get using nvclk as a source */
+               clk = calc_div(perflvl->core, perflvl->vdec, &P1);
+
+               /* see how close we can get using xpll/hclk as a source */
+               if (dev_priv->chipset != 0x98)
+                       out = read_pll(dev, 0x004030);
+               else
+                       out = read_clk(dev, clk_src_hclkm3d2);
+               out = calc_div(out, perflvl->vdec, &P2);
+
+               /* select whichever gets us closest */
+               info->amast &= ~0x00000c00;
+               info->pdivs &= ~0x00000700;
+               if (abs((int)perflvl->vdec - clk) <=
+                   abs((int)perflvl->vdec - out)) {
+                       if (dev_priv->chipset != 0x98)
+                               info->amast |= 0x00000c00;
+                       info->pdivs |= P1 << 8;
+               } else {
+                       info->amast |= 0x00000800;
+                       info->pdivs |= P2 << 8;
+               }
+       }
+
+       /* dom6: nfi what this is, but we're limited to various combinations
+        * of the host clock frequency
+        */
+       if (perflvl->dom6) {
+               info->amast &= ~0x0c000000;
+               if (clk_same(perflvl->dom6, read_clk(dev, clk_src_href))) {
+                       info->amast |= 0x00000000;
+               } else
+               if (clk_same(perflvl->dom6, read_clk(dev, clk_src_hclk))) {
+                       info->amast |= 0x08000000;
+               } else {
+                       clk = read_clk(dev, clk_src_hclk) * 3;
+                       clk = calc_div(clk, perflvl->dom6, &P1);
+
+                       info->amast |= 0x0c000000;
+                       info->pdivs  = (info->pdivs & ~0x00000007) | P1;
+               }
        }
 
-       ret = nv50_calc_pll(dev, &state->pll, khz, &state->N, &state->M,
-                           &dummy, &dummy, &state->P);
-       if (ret < 0) {
-               kfree(state);
-               return ERR_PTR(ret);
+       return info;
+error:
+       kfree(info);
+       return ERR_PTR(ret);
+}
+
+static int
+prog_mclk(struct drm_device *dev, struct hwsq_ucode *hwsq)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       u32 hwsq_data, hwsq_kick;
+       int i;
+
+       if (dev_priv->chipset < 0x90) {
+               hwsq_data = 0x001400;
+               hwsq_kick = 0x00000003;
+       } else {
+               hwsq_data = 0x080000;
+               hwsq_kick = 0x00000001;
        }
 
-       return state;
+       /* upload hwsq ucode */
+       nv_mask(dev, 0x001098, 0x00000008, 0x00000000);
+       nv_wr32(dev, 0x001304, 0x00000000);
+       for (i = 0; i < hwsq->len / 4; i++)
+               nv_wr32(dev, hwsq_data + (i * 4), hwsq->ptr.u32[i]);
+       nv_mask(dev, 0x001098, 0x00000018, 0x00000018);
+
+       /* launch, and wait for completion */
+       nv_wr32(dev, 0x00130c, hwsq_kick);
+       if (!nv_wait(dev, 0x001308, 0x00000100, 0x00000000)) {
+               NV_ERROR(dev, "hwsq ucode exec timed out\n");
+               NV_ERROR(dev, "0x001308: 0x%08x\n", nv_rd32(dev, 0x001308));
+               for (i = 0; i < hwsq->len / 4; i++) {
+                       NV_ERROR(dev, "0x%06x: 0x%08x\n", 0x1400 + (i * 4),
+                                nv_rd32(dev, 0x001400 + (i * 4)));
+               }
+
+               return -EIO;
+       }
+
+       return 0;
 }
 
-void
-nv50_pm_clock_set(struct drm_device *dev, void *pre_state)
+int
+nv50_pm_clocks_set(struct drm_device *dev, void *data)
 {
-       struct nv50_pm_state *state = pre_state;
-       struct nouveau_pm_level *perflvl = state->perflvl;
-       u32 reg = state->pll.reg, tmp;
-       struct bit_entry BIT_M;
-       u16 script;
-       int N = state->N;
-       int M = state->M;
-       int P = state->P;
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nv50_pm_state *info = data;
+       struct bit_entry M;
+       int ret = 0;
 
-       if (state->type == PLL_MEMORY && perflvl->memscript &&
-           bit_table(dev, 'M', &BIT_M) == 0 &&
-           BIT_M.version == 1 && BIT_M.length >= 0x0b) {
-               script = ROM16(BIT_M.data[0x05]);
-               if (script)
-                       nouveau_bios_run_init_table(dev, script, NULL, -1);
-               script = ROM16(BIT_M.data[0x07]);
-               if (script)
-                       nouveau_bios_run_init_table(dev, script, NULL, -1);
-               script = ROM16(BIT_M.data[0x09]);
-               if (script)
-                       nouveau_bios_run_init_table(dev, script, NULL, -1);
+       /* halt and idle execution engines */
+       nv_mask(dev, 0x002504, 0x00000001, 0x00000001);
+       if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010))
+               goto error;
 
-               nouveau_bios_run_init_table(dev, perflvl->memscript, NULL, -1);
+       /* memory: it is *very* important we change this first, the ucode
+        * we build in pre() now has hardcoded 0xc040 values, which can't
+        * change before we execute it or the engine clocks may end up
+        * messed up.
+        */
+       if (info->mclk_hwsq.len) {
+               /* execute some scripts that do ??? from the vbios.. */
+               if (!bit_table(dev, 'M', &M) && M.version == 1) {
+                       if (M.length >= 6)
+                               nouveau_bios_init_exec(dev, ROM16(M.data[5]));
+                       if (M.length >= 8)
+                               nouveau_bios_init_exec(dev, ROM16(M.data[7]));
+                       if (M.length >= 10)
+                               nouveau_bios_init_exec(dev, ROM16(M.data[9]));
+                       nouveau_bios_init_exec(dev, info->mscript);
+               }
+
+               ret = prog_mclk(dev, &info->mclk_hwsq);
+               if (ret)
+                       goto resume;
        }
 
-       if (state->type == PLL_MEMORY) {
-               nv_wr32(dev, 0x100210, 0);
-               nv_wr32(dev, 0x1002dc, 1);
+       /* reclock vdec/dom6 */
+       nv_mask(dev, 0x00c040, 0x00000c00, 0x00000000);
+       switch (dev_priv->chipset) {
+       case 0x92:
+       case 0x94:
+       case 0x96:
+               nv_mask(dev, 0x004800, 0x00000707, info->pdivs);
+               break;
+       default:
+               nv_mask(dev, 0x004700, 0x00000707, info->pdivs);
+               break;
        }
+       nv_mask(dev, 0x00c040, 0x0c000c00, info->amast);
 
-       tmp  = nv_rd32(dev, reg + 0) & 0xfff8ffff;
-       tmp |= 0x80000000 | (P << 16);
-       nv_wr32(dev, reg + 0, tmp);
-       nv_wr32(dev, reg + 4, (N << 8) | M);
+       /* core/shader: make sure sclk/nvclk are disconnected from their
+        * plls (nvclk to dom6, sclk to hclk), modify the plls, and
+        * reconnect sclk/nvclk to their new clock source
+        */
+       if (dev_priv->chipset < 0x92)
+               nv_mask(dev, 0x00c040, 0x001000b0, 0x00100080); /* grrr! */
+       else
+               nv_mask(dev, 0x00c040, 0x000000b3, 0x00000081);
+       nv_mask(dev, 0x004020, 0xc03f0100, info->sctrl);
+       nv_wr32(dev, 0x004024, info->scoef);
+       nv_mask(dev, 0x004028, 0xc03f0100, info->nctrl);
+       nv_wr32(dev, 0x00402c, info->ncoef);
+       nv_mask(dev, 0x00c040, 0x00100033, info->emast);
+
+       goto resume;
+error:
+       ret = -EBUSY;
+resume:
+       nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
+       kfree(info);
+       return ret;
+}
 
-       if (state->type == PLL_MEMORY) {
-               nv_wr32(dev, 0x1002dc, 0);
-               nv_wr32(dev, 0x100210, 0x80000000);
+static int
+pwm_info(struct drm_device *dev, int *line, int *ctrl, int *indx)
+{
+       if (*line == 0x04) {
+               *ctrl = 0x00e100;
+               *line = 4;
+               *indx = 0;
+       } else
+       if (*line == 0x09) {
+               *ctrl = 0x00e100;
+               *line = 9;
+               *indx = 1;
+       } else
+       if (*line == 0x10) {
+               *ctrl = 0x00e28c;
+               *line = 0;
+               *indx = 0;
+       } else {
+               NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", *line);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+int
+nv50_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
+{
+       int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id);
+       if (ret)
+               return ret;
+
+       if (nv_rd32(dev, ctrl) & (1 << line)) {
+               *divs = nv_rd32(dev, 0x00e114 + (id * 8));
+               *duty = nv_rd32(dev, 0x00e118 + (id * 8));
+               return 0;
        }
 
-       kfree(state);
+       return -EINVAL;
 }
 
+int
+nv50_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
+{
+       int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id);
+       if (ret)
+               return ret;
+
+       nv_mask(dev, ctrl, 0x00010001 << line, 0x00000001 << line);
+       nv_wr32(dev, 0x00e114 + (id * 8), divs);
+       nv_wr32(dev, 0x00e118 + (id * 8), duty | 0x80000000);
+       return 0;
+}
index 2633aa8..c4423ba 100644 (file)
@@ -60,6 +60,8 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
        BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
        OUT_RING  (evo, 0);
 
+       nouveau_hdmi_mode_set(encoder, NULL);
+
        nv_encoder->crtc = NULL;
        nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
 }
@@ -172,6 +174,12 @@ nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
 static void
 nv50_sor_prepare(struct drm_encoder *encoder)
 {
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       nv50_sor_disconnect(encoder);
+       if (nv_encoder->dcb->type == OUTPUT_DP) {
+               /* avoid race between link training and supervisor intr */
+               nv50_display_sync(encoder->dev);
+       }
 }
 
 static void
@@ -180,8 +188,8 @@ nv50_sor_commit(struct drm_encoder *encoder)
 }
 
 static void
-nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
-                 struct drm_display_mode *adjusted_mode)
+nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
+                 struct drm_display_mode *mode)
 {
        struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
        struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
@@ -193,24 +201,27 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 
        NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n",
                     nv_encoder->or, nv_encoder->dcb->type, crtc->index);
+       nv_encoder->crtc = encoder->crtc;
 
        switch (nv_encoder->dcb->type) {
        case OUTPUT_TMDS:
                if (nv_encoder->dcb->sorconf.link & 1) {
-                       if (adjusted_mode->clock < 165000)
+                       if (mode->clock < 165000)
                                mode_ctl = 0x0100;
                        else
                                mode_ctl = 0x0500;
                } else
                        mode_ctl = 0x0200;
+
+               nouveau_hdmi_mode_set(encoder, mode);
                break;
        case OUTPUT_DP:
                nv_connector = nouveau_encoder_connector_get(nv_encoder);
                if (nv_connector && nv_connector->base.display_info.bpc == 6) {
-                       nv_encoder->dp.datarate = crtc->mode->clock * 18 / 8;
+                       nv_encoder->dp.datarate = mode->clock * 18 / 8;
                        mode_ctl |= 0x00020000;
                } else {
-                       nv_encoder->dp.datarate = crtc->mode->clock * 24 / 8;
+                       nv_encoder->dp.datarate = mode->clock * 24 / 8;
                        mode_ctl |= 0x00050000;
                }
 
@@ -228,10 +239,10 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
        else
                mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
 
-       if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
                mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
 
-       if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
                mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
 
        nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -239,12 +250,11 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
        ret = RING_SPACE(evo, 2);
        if (ret) {
                NV_ERROR(dev, "no space while connecting SOR\n");
+               nv_encoder->crtc = NULL;
                return;
        }
        BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
        OUT_RING(evo, mode_ctl);
-
-       nv_encoder->crtc = encoder->crtc;
 }
 
 static struct drm_crtc *
index 40b84f2..6f38cea 100644 (file)
@@ -48,7 +48,7 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
                        phys |= 0x60;
                else if (coverage <= 64 * 1024 * 1024)
                        phys |= 0x40;
-               else if (coverage < 128 * 1024 * 1024)
+               else if (coverage <= 128 * 1024 * 1024)
                        phys |= 0x20;
        }
 
diff --git a/drivers/gpu/drm/nouveau/nv84_bsp.c b/drivers/gpu/drm/nouveau/nv84_bsp.c
new file mode 100644 (file)
index 0000000..7487573
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
+
+/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
+ *     more than just an enable/disable stub this needs to be split out to
+ *     nv98_bsp.c...
+ */
+
+struct nv84_bsp_engine {
+       struct nouveau_exec_engine base;
+};
+
+static int
+nv84_bsp_fini(struct drm_device *dev, int engine, bool suspend)
+{
+       if (!(nv_rd32(dev, 0x000200) & 0x00008000))
+               return 0;
+
+       nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
+       return 0;
+}
+
+static int
+nv84_bsp_init(struct drm_device *dev, int engine)
+{
+       nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
+       nv_mask(dev, 0x000200, 0x00008000, 0x00008000);
+       return 0;
+}
+
+static void
+nv84_bsp_destroy(struct drm_device *dev, int engine)
+{
+       struct nv84_bsp_engine *pbsp = nv_engine(dev, engine);
+
+       NVOBJ_ENGINE_DEL(dev, BSP);
+
+       kfree(pbsp);
+}
+
+int
+nv84_bsp_create(struct drm_device *dev)
+{
+       struct nv84_bsp_engine *pbsp;
+
+       pbsp = kzalloc(sizeof(*pbsp), GFP_KERNEL);
+       if (!pbsp)
+               return -ENOMEM;
+
+       pbsp->base.destroy = nv84_bsp_destroy;
+       pbsp->base.init = nv84_bsp_init;
+       pbsp->base.fini = nv84_bsp_fini;
+
+       NVOBJ_ENGINE_ADD(dev, BSP, &pbsp->base);
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv84_vp.c b/drivers/gpu/drm/nouveau/nv84_vp.c
new file mode 100644 (file)
index 0000000..6570d30
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
+
+/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
+ *     more than just an enable/disable stub this needs to be split out to
+ *     nv98_vp.c...
+ */
+
+struct nv84_vp_engine {
+       struct nouveau_exec_engine base;
+};
+
+static int
+nv84_vp_fini(struct drm_device *dev, int engine, bool suspend)
+{
+       if (!(nv_rd32(dev, 0x000200) & 0x00020000))
+               return 0;
+
+       nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
+       return 0;
+}
+
+static int
+nv84_vp_init(struct drm_device *dev, int engine)
+{
+       nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
+       nv_mask(dev, 0x000200, 0x00020000, 0x00020000);
+       return 0;
+}
+
+static void
+nv84_vp_destroy(struct drm_device *dev, int engine)
+{
+       struct nv84_vp_engine *pvp = nv_engine(dev, engine);
+
+       NVOBJ_ENGINE_DEL(dev, VP);
+
+       kfree(pvp);
+}
+
+int
+nv84_vp_create(struct drm_device *dev)
+{
+       struct nv84_vp_engine *pvp;
+
+       pvp = kzalloc(sizeof(*pvp), GFP_KERNEL);
+       if (!pvp)
+               return -ENOMEM;
+
+       pvp->base.destroy = nv84_vp_destroy;
+       pvp->base.init = nv84_vp_init;
+       pvp->base.fini = nv84_vp_fini;
+
+       NVOBJ_ENGINE_ADD(dev, VP, &pvp->base);
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.c b/drivers/gpu/drm/nouveau/nv98_crypt.c
new file mode 100644 (file)
index 0000000..db94ff0
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
+
+struct nv98_crypt_engine {
+       struct nouveau_exec_engine base;
+};
+
+static int
+nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
+{
+       if (!(nv_rd32(dev, 0x000200) & 0x00004000))
+               return 0;
+
+       nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
+       return 0;
+}
+
+static int
+nv98_crypt_init(struct drm_device *dev, int engine)
+{
+       nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
+       nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
+       return 0;
+}
+
+static void
+nv98_crypt_destroy(struct drm_device *dev, int engine)
+{
+       struct nv98_crypt_engine *pcrypt = nv_engine(dev, engine);
+
+       NVOBJ_ENGINE_DEL(dev, CRYPT);
+
+       kfree(pcrypt);
+}
+
+int
+nv98_crypt_create(struct drm_device *dev)
+{
+       struct nv98_crypt_engine *pcrypt;
+
+       pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL);
+       if (!pcrypt)
+               return -ENOMEM;
+
+       pcrypt->base.destroy = nv98_crypt_destroy;
+       pcrypt->base.init = nv98_crypt_init;
+       pcrypt->base.fini = nv98_crypt_fini;
+
+       NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base);
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv98_ppp.c b/drivers/gpu/drm/nouveau/nv98_ppp.c
new file mode 100644 (file)
index 0000000..a987dd6
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
+
+struct nv98_ppp_engine {
+       struct nouveau_exec_engine base;
+};
+
+static int
+nv98_ppp_fini(struct drm_device *dev, int engine, bool suspend)
+{
+       if (!(nv_rd32(dev, 0x000200) & 0x00000002))
+               return 0;
+
+       nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
+       return 0;
+}
+
+static int
+nv98_ppp_init(struct drm_device *dev, int engine)
+{
+       nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
+       nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
+       return 0;
+}
+
+static void
+nv98_ppp_destroy(struct drm_device *dev, int engine)
+{
+       struct nv98_ppp_engine *pppp = nv_engine(dev, engine);
+
+       NVOBJ_ENGINE_DEL(dev, PPP);
+
+       kfree(pppp);
+}
+
+int
+nv98_ppp_create(struct drm_device *dev)
+{
+       struct nv98_ppp_engine *pppp;
+
+       pppp = kzalloc(sizeof(*pppp), GFP_KERNEL);
+       if (!pppp)
+               return -ENOMEM;
+
+       pppp->base.destroy = nv98_ppp_destroy;
+       pppp->base.init = nv98_ppp_init;
+       pppp->base.fini = nv98_ppp_fini;
+
+       NVOBJ_ENGINE_ADD(dev, PPP, &pppp->base);
+       return 0;
+}
index eaf35f8..abc3662 100644 (file)
@@ -31,8 +31,9 @@
  */
 
 ifdef(`NVA3',
-.section nva3_pcopy_data,
-.section nvc0_pcopy_data
+.section #nva3_pcopy_data
+,
+.section #nvc0_pcopy_data
 )
 
 ctx_object:                   .b32 0
@@ -42,7 +43,7 @@ ctx_dma_query:                .b32 0
 ctx_dma_src:                  .b32 0
 ctx_dma_dst:                  .b32 0
 ,)
-.equ ctx_dma_count 3
+.equ #ctx_dma_count 3
 ctx_query_address_high:       .b32 0
 ctx_query_address_low:        .b32 0
 ctx_query_counter:            .b32 0
@@ -78,64 +79,65 @@ ctx_ycnt:                     .b32 0
 dispatch_table:
 // mthd 0x0000, NAME
 .b16 0x000 1
-.b32 ctx_object                     ~0xffffffff
+.b32 #ctx_object                     ~0xffffffff
 // mthd 0x0100, NOP
 .b16 0x040 1
-.b32 0x00010000 + cmd_nop           ~0xffffffff
+.b32 0x00010000 + #cmd_nop           ~0xffffffff
 // mthd 0x0140, PM_TRIGGER
 .b16 0x050 1
-.b32 0x00010000 + cmd_pm_trigger    ~0xffffffff
+.b32 0x00010000 + #cmd_pm_trigger    ~0xffffffff
 ifdef(`NVA3', `
 // mthd 0x0180-0x018c, DMA_
-.b16 0x060 ctx_dma_count
+.b16 0x060 #ctx_dma_count
 dispatch_dma:
-.b32 0x00010000 + cmd_dma           ~0xffffffff
-.b32 0x00010000 + cmd_dma           ~0xffffffff
-.b32 0x00010000 + cmd_dma           ~0xffffffff
+.b32 0x00010000 + #cmd_dma           ~0xffffffff
+.b32 0x00010000 + #cmd_dma           ~0xffffffff
+.b32 0x00010000 + #cmd_dma           ~0xffffffff
 ',)
 // mthd 0x0200-0x0218, SRC_TILE
 .b16 0x80 7
-.b32 ctx_src_tile_mode              ~0x00000fff
-.b32 ctx_src_xsize                  ~0x0007ffff
-.b32 ctx_src_ysize                  ~0x00001fff
-.b32 ctx_src_zsize                  ~0x000007ff
-.b32 ctx_src_zoff                   ~0x00000fff
-.b32 ctx_src_xoff                   ~0x0007ffff
-.b32 ctx_src_yoff                   ~0x00001fff
+.b32 #ctx_src_tile_mode              ~0x00000fff
+.b32 #ctx_src_xsize                  ~0x0007ffff
+.b32 #ctx_src_ysize                  ~0x00001fff
+.b32 #ctx_src_zsize                  ~0x000007ff
+.b32 #ctx_src_zoff                   ~0x00000fff
+.b32 #ctx_src_xoff                   ~0x0007ffff
+.b32 #ctx_src_yoff                   ~0x00001fff
 // mthd 0x0220-0x0238, DST_TILE
 .b16 0x88 7
-.b32 ctx_dst_tile_mode              ~0x00000fff
-.b32 ctx_dst_xsize                  ~0x0007ffff
-.b32 ctx_dst_ysize                  ~0x00001fff
-.b32 ctx_dst_zsize                  ~0x000007ff
-.b32 ctx_dst_zoff                   ~0x00000fff
-.b32 ctx_dst_xoff                   ~0x0007ffff
-.b32 ctx_dst_yoff                   ~0x00001fff
+.b32 #ctx_dst_tile_mode              ~0x00000fff
+.b32 #ctx_dst_xsize                  ~0x0007ffff
+.b32 #ctx_dst_ysize                  ~0x00001fff
+.b32 #ctx_dst_zsize                  ~0x000007ff
+.b32 #ctx_dst_zoff                   ~0x00000fff
+.b32 #ctx_dst_xoff                   ~0x0007ffff
+.b32 #ctx_dst_yoff                   ~0x00001fff
 // mthd 0x0300-0x0304, EXEC, WRCACHE_FLUSH
 .b16 0xc0 2
-.b32 0x00010000 + cmd_exec          ~0xffffffff
-.b32 0x00010000 + cmd_wrcache_flush ~0xffffffff
+.b32 0x00010000 + #cmd_exec          ~0xffffffff
+.b32 0x00010000 + #cmd_wrcache_flush ~0xffffffff
 // mthd 0x030c-0x0340, various stuff
 .b16 0xc3 14
-.b32 ctx_src_address_high           ~0x000000ff
-.b32 ctx_src_address_low            ~0xfffffff0
-.b32 ctx_dst_address_high           ~0x000000ff
-.b32 ctx_dst_address_low            ~0xfffffff0
-.b32 ctx_src_pitch                  ~0x0007ffff
-.b32 ctx_dst_pitch                  ~0x0007ffff
-.b32 ctx_xcnt                       ~0x0000ffff
-.b32 ctx_ycnt                       ~0x00001fff
-.b32 ctx_format                     ~0x0333ffff
-.b32 ctx_swz_const0                 ~0xffffffff
-.b32 ctx_swz_const1                 ~0xffffffff
-.b32 ctx_query_address_high         ~0x000000ff
-.b32 ctx_query_address_low          ~0xffffffff
-.b32 ctx_query_counter              ~0xffffffff
+.b32 #ctx_src_address_high           ~0x000000ff
+.b32 #ctx_src_address_low            ~0xfffffff0
+.b32 #ctx_dst_address_high           ~0x000000ff
+.b32 #ctx_dst_address_low            ~0xfffffff0
+.b32 #ctx_src_pitch                  ~0x0007ffff
+.b32 #ctx_dst_pitch                  ~0x0007ffff
+.b32 #ctx_xcnt                       ~0x0000ffff
+.b32 #ctx_ycnt                       ~0x00001fff
+.b32 #ctx_format                     ~0x0333ffff
+.b32 #ctx_swz_const0                 ~0xffffffff
+.b32 #ctx_swz_const1                 ~0xffffffff
+.b32 #ctx_query_address_high         ~0x000000ff
+.b32 #ctx_query_address_low          ~0xffffffff
+.b32 #ctx_query_counter              ~0xffffffff
 .b16 0x800 0
 
 ifdef(`NVA3',
-.section nva3_pcopy_code,
-.section nvc0_pcopy_code
+.section #nva3_pcopy_code
+,
+.section #nvc0_pcopy_code
 )
 
 main:
@@ -143,12 +145,12 @@ main:
    mov $sp $r0
 
    // setup i0 handler and route fifo and ctxswitch to it
-   mov $r1 ih
+   mov $r1 #ih
    mov $iv0 $r1
    mov $r1 0x400
    movw $r2 0xfff3
    sethi $r2 0
-   iowr I[$r2 + 0x300] $r2
+   iowr I[$r1 + 0x300] $r2
 
    // enable interrupts
    or $r2 0xc
@@ -164,19 +166,19 @@ main:
    bset $flags $p0
    spin:
       sleep $p0
-      bra spin
+      bra #spin
 
 // i0 handler
 ih:
    iord $r1 I[$r0 + 0x200]
 
    and $r2 $r1 0x00000008
-   bra e ih_no_chsw
-      call chsw
+   bra e #ih_no_chsw
+      call #chsw
    ih_no_chsw:
    and $r2 $r1 0x00000004
-   bra e ih_no_cmd
-      call dispatch
+   bra e #ih_no_cmd
+      call #dispatch
 
    ih_no_cmd:
    and $r1 $r1 0x0000000c
@@ -235,9 +237,9 @@ ifdef(`NVA3', `
    sethi $r4 0x60000
 
    // swap!
-   bra $p1 swctx_load
+   bra $p1 #swctx_load
       xdst $r0 $r4
-      bra swctx_done
+      bra #swctx_done
    swctx_load:
       xdld $r0 $r4
    swctx_done:
@@ -251,9 +253,9 @@ chsw:
 
    // if it's active, unload it and return
    xbit $r15 $r3 0x1e
-   bra e chsw_no_unload
+   bra e #chsw_no_unload
       bclr $flags $p1
-      call swctx
+      call #swctx
       bclr $r3 0x1e
       iowr I[$r2] $r3
       mov $r4 1
@@ -266,20 +268,20 @@ chsw:
 
    // is there a channel waiting to be loaded?
    xbit $r13 $r3 0x1e
-   bra e chsw_finish_load
+   bra e #chsw_finish_load
       bset $flags $p1
-      call swctx
+      call #swctx
 ifdef(`NVA3',
       // load dma objects back into TARGET regs
-      mov $r5 ctx_dma
-      mov $r6 ctx_dma_count
+      mov $r5 #ctx_dma
+      mov $r6 #ctx_dma_count
       chsw_load_ctx_dma:
          ld b32 $r7 D[$r5 + $r6 * 4]
          add b32 $r8 $r6 0x180
          shl b32 $r8 8
          iowr I[$r8] $r7
          sub b32 $r6 1
-         bra nc chsw_load_ctx_dma
+         bra nc #chsw_load_ctx_dma
 ,)
 
    chsw_finish_load:
@@ -297,7 +299,7 @@ dispatch:
    shl b32 $r2 0x10
 
    // lookup method in the dispatch table, ILLEGAL_MTHD if not found
-   mov $r5 dispatch_table
+   mov $r5 #dispatch_table
    clear b32 $r6
    clear b32 $r7
    dispatch_loop:
@@ -305,14 +307,14 @@ dispatch:
       ld b16 $r7 D[$r5 + 2]
       add b32 $r5 4
       cmpu b32 $r4 $r6
-      bra c dispatch_illegal_mthd
+      bra c #dispatch_illegal_mthd
       add b32 $r7 $r6
       cmpu b32 $r4 $r7
-      bra c dispatch_valid_mthd
+      bra c #dispatch_valid_mthd
       sub b32 $r7 $r6
       shl b32 $r7 3
       add b32 $r5 $r7
-      bra dispatch_loop
+      bra #dispatch_loop
 
    // ensure no bits set in reserved fields, INVALID_BITFIELD
    dispatch_valid_mthd:
@@ -322,20 +324,20 @@ dispatch:
    ld b32 $r5 D[$r4 + 4]
    and $r5 $r3
    cmpu b32 $r5 0
-   bra ne dispatch_invalid_bitfield
+   bra ne #dispatch_invalid_bitfield
 
    // depending on dispatch flags: execute method, or save data as state
    ld b16 $r5 D[$r4 + 0]
    ld b16 $r6 D[$r4 + 2]
    cmpu b32 $r6 0
-   bra ne dispatch_cmd
+   bra ne #dispatch_cmd
       st b32 D[$r5] $r3
-      bra dispatch_done
+      bra #dispatch_done
    dispatch_cmd:
       bclr $flags $p1
       call $r5
-      bra $p1 dispatch_error
-      bra dispatch_done
+      bra $p1 #dispatch_error
+      bra #dispatch_done
 
    dispatch_invalid_bitfield:
    or $r2 2
@@ -353,7 +355,7 @@ dispatch:
       iord $r2 I[$r0 + 0x200]
       and $r2 0x40
       cmpu b32 $r2 0
-      bra ne hostirq_wait
+      bra ne #hostirq_wait
 
    dispatch_done:
    mov $r2 0x1d00
@@ -409,10 +411,10 @@ ifdef(`NVA3',
 //       $r2: hostirq state
 //       $r3: data
 cmd_dma:
-   sub b32 $r4 dispatch_dma
+   sub b32 $r4 #dispatch_dma
    shr b32 $r4 1
    bset $r3 0x1e
-   st b32 D[$r4 + ctx_dma] $r3
+   st b32 D[$r4 + #ctx_dma] $r3
    add b32 $r4 0x600
    shl b32 $r4 6
    iowr I[$r4] $r3
@@ -430,7 +432,7 @@ cmd_exec_set_format:
    st b32 D[$sp + 0x0c] $r0
 
    // extract cpp, src_ncomp and dst_ncomp from FORMAT
-   ld b32 $r4 D[$r0 + ctx_format]
+   ld b32 $r4 D[$r0 + #ctx_format]
    extr $r5 $r4 16:17
    add b32 $r5 1
    extr $r6 $r4 20:21
@@ -448,22 +450,22 @@ cmd_exec_set_format:
       clear b32 $r11
       bpc_loop:
          cmpu b8 $r10 4
-         bra nc cmp_c0
+         bra nc #cmp_c0
             mulu $r12 $r10 $r5
             add b32 $r12 $r11
             bset $flags $p2
-            bra bpc_next
+            bra #bpc_next
          cmp_c0:
-         bra ne cmp_c1
+         bra ne #cmp_c1
             mov $r12 0x10
             add b32 $r12 $r11
-            bra bpc_next
+            bra #bpc_next
          cmp_c1:
          cmpu b8 $r10 6
-         bra nc cmp_zero
+         bra nc #cmp_zero
             mov $r12 0x14
             add b32 $r12 $r11
-            bra bpc_next
+            bra #bpc_next
          cmp_zero:
             mov $r12 0x80
          bpc_next:
@@ -471,22 +473,22 @@ cmd_exec_set_format:
          add b32 $r8 1
          add b32 $r11 1
          cmpu b32 $r11 $r5
-         bra c bpc_loop
+         bra c #bpc_loop
       add b32 $r9 1
       cmpu b32 $r9 $r7
-      bra c ncomp_loop
+      bra c #ncomp_loop
 
    // SRC_XCNT = (xcnt * src_cpp), or 0 if no src ref in swz (hw will hang)
    mulu $r6 $r5
-   st b32 D[$r0 + ctx_src_cpp] $r6
-   ld b32 $r8 D[$r0 + ctx_xcnt]
+   st b32 D[$r0 + #ctx_src_cpp] $r6
+   ld b32 $r8 D[$r0 + #ctx_xcnt]
    mulu $r6 $r8
-   bra $p2 dst_xcnt
+   bra $p2 #dst_xcnt
    clear b32 $r6
 
    dst_xcnt:
    mulu $r7 $r5
-   st b32 D[$r0 + ctx_dst_cpp] $r7
+   st b32 D[$r0 + #ctx_dst_cpp] $r7
    mulu $r7 $r8
 
    mov $r5 0x810
@@ -494,10 +496,10 @@ cmd_exec_set_format:
    iowr I[$r5 + 0x000] $r6
    iowr I[$r5 + 0x100] $r7
    add b32 $r5 0x800
-   ld b32 $r6 D[$r0 + ctx_dst_cpp]
+   ld b32 $r6 D[$r0 + #ctx_dst_cpp]
    sub b32 $r6 1
    shl b32 $r6 8
-   ld b32 $r7 D[$r0 + ctx_src_cpp]
+   ld b32 $r7 D[$r0 + #ctx_src_cpp]
    sub b32 $r7 1
    or $r6 $r7
    iowr I[$r5 + 0x000] $r6
@@ -511,9 +513,9 @@ cmd_exec_set_format:
    ld b32 $r6 D[$sp + 0x0c]
    iowr I[$r5 + 0x300] $r6
    add b32 $r5 0x400
-   ld b32 $r6 D[$r0 + ctx_swz_const0]
+   ld b32 $r6 D[$r0 + #ctx_swz_const0]
    iowr I[$r5 + 0x000] $r6
-   ld b32 $r6 D[$r0 + ctx_swz_const1]
+   ld b32 $r6 D[$r0 + #ctx_swz_const1]
    iowr I[$r5 + 0x100] $r6
    add $sp 0x10
    ret
@@ -543,7 +545,7 @@ cmd_exec_set_format:
 //
 cmd_exec_set_surface_tiled:
    // translate TILE_MODE into Tp, Th, Td shift values
-   ld b32 $r7 D[$r5 + ctx_src_tile_mode]
+   ld b32 $r7 D[$r5 + #ctx_src_tile_mode]
    extr $r9 $r7 8:11
    extr $r8 $r7 4:7
 ifdef(`NVA3',
@@ -553,9 +555,9 @@ ifdef(`NVA3',
 )
    extr $r7 $r7 0:3
    cmp b32 $r7 0xe
-   bra ne xtile64
+   bra ne #xtile64
    mov $r7 4
-   bra xtileok
+   bra #xtileok
    xtile64:
    xbit $r7 $flags $p2
    add b32 $r7 17
@@ -565,8 +567,8 @@ ifdef(`NVA3',
 
    // Op = (x * cpp) & ((1 << Tp) - 1)
    // Tx = (x * cpp) >> Tp
-   ld b32 $r10 D[$r5 + ctx_src_xoff]
-   ld b32 $r11 D[$r5 + ctx_src_cpp]
+   ld b32 $r10 D[$r5 + #ctx_src_xoff]
+   ld b32 $r11 D[$r5 + #ctx_src_cpp]
    mulu $r10 $r11
    mov $r11 1
    shl b32 $r11 $r7
@@ -576,7 +578,7 @@ ifdef(`NVA3',
 
    // Tyo = y & ((1 << Th) - 1)
    // Ty  = y >> Th
-   ld b32 $r13 D[$r5 + ctx_src_yoff]
+   ld b32 $r13 D[$r5 + #ctx_src_yoff]
    mov $r14 1
    shl b32 $r14 $r8
    sub b32 $r14 1
@@ -598,8 +600,8 @@ ifdef(`NVA3',
    add b32 $r12 $r11
 
    // nTx = ((w * cpp) + ((1 << Tp) - 1) >> Tp)
-   ld b32 $r15 D[$r5 + ctx_src_xsize]
-   ld b32 $r11 D[$r5 + ctx_src_cpp]
+   ld b32 $r15 D[$r5 + #ctx_src_xsize]
+   ld b32 $r11 D[$r5 + #ctx_src_cpp]
    mulu $r15 $r11
    mov $r11 1
    shl b32 $r11 $r7
@@ -609,7 +611,7 @@ ifdef(`NVA3',
    push $r15
 
    // nTy = (h + ((1 << Th) - 1)) >> Th
-   ld b32 $r15 D[$r5 + ctx_src_ysize]
+   ld b32 $r15 D[$r5 + #ctx_src_ysize]
    mov $r11 1
    shl b32 $r11 $r8
    sub b32 $r11 1
@@ -629,7 +631,7 @@ ifdef(`NVA3',
    // Tz  = z >> Td
    // Op += Tzo << Tys
    // Ts  = Tys + Td
-   ld b32 $r8 D[$r5 + ctx_src_zoff]
+   ld b32 $r8 D[$r5 + #ctx_src_zoff]
    mov $r14 1
    shl b32 $r14 $r9
    sub b32 $r14 1
@@ -656,8 +658,8 @@ ifdef(`NVA3',
 
    // SRC_ADDRESS_LOW   = (Ot + Op) & 0xffffffff
    // CFG_ADDRESS_HIGH |= ((Ot + Op) >> 32) << 16
-   ld b32 $r7 D[$r5 + ctx_src_address_low]
-   ld b32 $r8 D[$r5 + ctx_src_address_high]
+   ld b32 $r7 D[$r5 + #ctx_src_address_low]
+   ld b32 $r8 D[$r5 + #ctx_src_address_high]
    add b32 $r10 $r12
    add b32 $r7 $r10
    adc b32 $r8 0
@@ -677,14 +679,14 @@ cmd_exec_set_surface_linear:
    xbit $r6 $flags $p2
    add b32 $r6 0x202
    shl b32 $r6 8
-   ld b32 $r7 D[$r5 + ctx_src_address_low]
+   ld b32 $r7 D[$r5 + #ctx_src_address_low]
    iowr I[$r6 + 0x000] $r7
    add b32 $r6 0x400
-   ld b32 $r7 D[$r5 + ctx_src_address_high]
+   ld b32 $r7 D[$r5 + #ctx_src_address_high]
    shl b32 $r7 16
    iowr I[$r6 + 0x000] $r7
    add b32 $r6 0x400
-   ld b32 $r7 D[$r5 + ctx_src_pitch]
+   ld b32 $r7 D[$r5 + #ctx_src_pitch]
    iowr I[$r6 + 0x000] $r7
    ret
 
@@ -697,7 +699,7 @@ cmd_exec_wait:
    loop:
       iord $r1 I[$r0]
       and $r1 1
-      bra ne loop
+      bra ne #loop
    pop $r1
    pop $r0
    ret
@@ -705,18 +707,18 @@ cmd_exec_wait:
 cmd_exec_query:
    // if QUERY_SHORT not set, write out { -, 0, TIME_LO, TIME_HI }
    xbit $r4 $r3 13
-   bra ne query_counter
-      call cmd_exec_wait
+   bra ne #query_counter
+      call #cmd_exec_wait
       mov $r4 0x80c
       shl b32 $r4 6
-      ld b32 $r5 D[$r0 + ctx_query_address_low]
+      ld b32 $r5 D[$r0 + #ctx_query_address_low]
       add b32 $r5 4
       iowr I[$r4 + 0x000] $r5
       iowr I[$r4 + 0x100] $r0
       mov $r5 0xc
       iowr I[$r4 + 0x200] $r5
       add b32 $r4 0x400
-      ld b32 $r5 D[$r0 + ctx_query_address_high]
+      ld b32 $r5 D[$r0 + #ctx_query_address_high]
       shl b32 $r5 16
       iowr I[$r4 + 0x000] $r5
       add b32 $r4 0x500
@@ -741,16 +743,16 @@ cmd_exec_query:
 
    // write COUNTER
    query_counter:
-   call cmd_exec_wait
+   call #cmd_exec_wait
    mov $r4 0x80c
    shl b32 $r4 6
-   ld b32 $r5 D[$r0 + ctx_query_address_low]
+   ld b32 $r5 D[$r0 + #ctx_query_address_low]
    iowr I[$r4 + 0x000] $r5
    iowr I[$r4 + 0x100] $r0
    mov $r5 0x4
    iowr I[$r4 + 0x200] $r5
    add b32 $r4 0x400
-   ld b32 $r5 D[$r0 + ctx_query_address_high]
+   ld b32 $r5 D[$r0 + #ctx_query_address_high]
    shl b32 $r5 16
    iowr I[$r4 + 0x000] $r5
    add b32 $r4 0x500
@@ -759,7 +761,7 @@ cmd_exec_query:
    mov $r5 0x00001110
    sethi $r5 0x13120000
    iowr I[$r4 + 0x100] $r5
-   ld b32 $r5 D[$r0 + ctx_query_counter]
+   ld b32 $r5 D[$r0 + #ctx_query_counter]
    add b32 $r4 0x500
    iowr I[$r4 + 0x000] $r5
    mov $r5 0x00002601
@@ -787,22 +789,22 @@ cmd_exec_query:
 //       $r2: hostirq state
 //       $r3: data
 cmd_exec:
-   call cmd_exec_wait
+   call #cmd_exec_wait
 
    // if format requested, call function to calculate it, otherwise
    // fill in cpp/xcnt for both surfaces as if (cpp == 1)
    xbit $r15 $r3 0
-   bra e cmd_exec_no_format
-      call cmd_exec_set_format
+   bra e #cmd_exec_no_format
+      call #cmd_exec_set_format
       mov $r4 0x200
-      bra cmd_exec_init_src_surface
+      bra #cmd_exec_init_src_surface
    cmd_exec_no_format:
       mov $r6 0x810
       shl b32 $r6 6
       mov $r7 1
-      st b32 D[$r0 + ctx_src_cpp] $r7
-      st b32 D[$r0 + ctx_dst_cpp] $r7
-      ld b32 $r7 D[$r0 + ctx_xcnt]
+      st b32 D[$r0 + #ctx_src_cpp] $r7
+      st b32 D[$r0 + #ctx_dst_cpp] $r7
+      ld b32 $r7 D[$r0 + #ctx_xcnt]
       iowr I[$r6 + 0x000] $r7
       iowr I[$r6 + 0x100] $r7
       clear b32 $r4
@@ -811,28 +813,28 @@ cmd_exec:
    bclr $flags $p2
    clear b32 $r5
    xbit $r15 $r3 4
-   bra e src_tiled
-      call cmd_exec_set_surface_linear
-      bra cmd_exec_init_dst_surface
+   bra e #src_tiled
+      call #cmd_exec_set_surface_linear
+      bra #cmd_exec_init_dst_surface
    src_tiled:
-      call cmd_exec_set_surface_tiled
+      call #cmd_exec_set_surface_tiled
       bset $r4 7
 
    cmd_exec_init_dst_surface:
    bset $flags $p2
-   mov $r5 ctx_dst_address_high - ctx_src_address_high
+   mov $r5 #ctx_dst_address_high - #ctx_src_address_high
    xbit $r15 $r3 8
-   bra e dst_tiled
-      call cmd_exec_set_surface_linear
-      bra cmd_exec_kick
+   bra e #dst_tiled
+      call #cmd_exec_set_surface_linear
+      bra #cmd_exec_kick
    dst_tiled:
-      call cmd_exec_set_surface_tiled
+      call #cmd_exec_set_surface_tiled
       bset $r4 8
 
    cmd_exec_kick:
    mov $r5 0x800
    shl b32 $r5 6
-   ld b32 $r6 D[$r0 + ctx_ycnt]
+   ld b32 $r6 D[$r0 + #ctx_ycnt]
    iowr I[$r5 + 0x100] $r6
    mov $r6 0x0041
    // SRC_TARGET = 1, DST_TARGET = 2
@@ -842,8 +844,8 @@ cmd_exec:
 
    // if requested, queue up a QUERY write after the copy has completed
    xbit $r15 $r3 12
-   bra e cmd_exec_done
-      call cmd_exec_query
+   bra e #cmd_exec_done
+      call #cmd_exec_query
 
    cmd_exec_done:
    ret
index 2731de2..1f33fbd 100644 (file)
@@ -152,7 +152,7 @@ uint32_t nva3_pcopy_code[] = {
        0xf10010fe,
        0xf1040017,
        0xf0fff327,
-       0x22d00023,
+       0x12d00023,
        0x0c25f0c0,
        0xf40012d0,
        0x17f11031,
index 618c144..9e636e6 100644 (file)
@@ -287,12 +287,13 @@ nva3_pm_grcp_idle(void *data)
        return false;
 }
 
-void
+int
 nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nva3_pm_state *info = pre_state;
        unsigned long flags;
+       int ret = -EAGAIN;
 
        /* prevent any new grctx switches from starting */
        spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
@@ -328,6 +329,8 @@ nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
                nv_wr32(dev, 0x100210, 0x80000000);
        }
 
+       ret = 0;
+
 cleanup:
        /* unfreeze PFIFO */
        nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
@@ -339,4 +342,5 @@ cleanup:
                nv_mask(dev, 0x400824, 0x10000000, 0x10000000);
        spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
        kfree(info);
+       return ret;
 }
index 4199038..a8d1745 100644 (file)
@@ -145,7 +145,7 @@ uint32_t nvc0_pcopy_code[] = {
        0xf10010fe,
        0xf1040017,
        0xf0fff327,
-       0x22d00023,
+       0x12d00023,
        0x0c25f0c0,
        0xf40012d0,
        0x17f11031,
index ecfafd7..8ee3963 100644 (file)
@@ -875,14 +875,16 @@ nvc0_graph_create(struct drm_device *dev)
        case 0xcf: /* 4/0/0/0, 3 */
                priv->magic_not_rop_nr = 0x03;
                break;
+       case 0xd9: /* 1/0/0/0, 1 */
+               priv->magic_not_rop_nr = 0x01;
+               break;
        }
 
        if (!priv->magic_not_rop_nr) {
                NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
                         priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
                         priv->tp_nr[3], priv->rop_nr);
-               /* use 0xc3's values... */
-               priv->magic_not_rop_nr = 0x03;
+               priv->magic_not_rop_nr = 0x00;
        }
 
        NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
index 2a4b6dc..e6b2288 100644 (file)
@@ -71,9 +71,9 @@ queue_put:
        ld b32 $r9 D[$r13 + 0x4]        // PUT
        xor $r8 8
        cmpu b32 $r8 $r9
-       bra ne queue_put_next
+       bra ne #queue_put_next
                mov $r15 E_CMD_OVERFLOW
-               call error
+               call #error
                ret
 
        // store cmd/data on queue
@@ -104,7 +104,7 @@ queue_get:
        ld b32 $r8 D[$r13 + 0x0]        // GET
        ld b32 $r9 D[$r13 + 0x4]        // PUT
        cmpu b32 $r8 $r9
-       bra e queue_get_done
+       bra e #queue_get_done
                // fetch first cmd/data pair
                and $r9 $r8 7
                shl b32 $r9 3
@@ -135,9 +135,9 @@ nv_rd32:
        nv_rd32_wait:
                iord $r12 I[$r11 + 0x000]
                xbit $r12 $r12 31
-               bra ne nv_rd32_wait
+               bra ne #nv_rd32_wait
        mov $r10 6                      // DONE_MMIO_RD
-       call wait_doneo
+       call #wait_doneo
        iord $r15 I[$r11 + 0x100]       // MMIO_RDVAL
        ret
 
@@ -157,7 +157,7 @@ nv_wr32:
        nv_wr32_wait:
                iord $r12 I[$r11 + 0x000]
                xbit $r12 $r12 31
-               bra ne nv_wr32_wait
+               bra ne #nv_wr32_wait
        ret
 
 // (re)set watchdog timer
@@ -193,7 +193,7 @@ $1:
                shl b32 $r8 6
                iord $r8 I[$r8 + 0x000] // DONE
                xbit $r8 $r8 $r10
-               bra $2 wait_done_$1
+               bra $2 #wait_done_$1
        trace_clr(T_WAIT)
        ret
 ')
@@ -216,7 +216,7 @@ mmctx_size:
                add b32 $r9 $r8
                add b32 $r14 4
                cmpu b32 $r14 $r15
-               bra ne nv_mmctx_size_loop
+               bra ne #nv_mmctx_size_loop
        mov b32 $r15 $r9
        ret
 
@@ -238,12 +238,12 @@ mmctx_xfer:
        shl b32 $r8 6
        clear b32 $r9
        or $r11 $r11
-       bra e mmctx_base_disabled
+       bra e #mmctx_base_disabled
                iowr I[$r8 + 0x000] $r11        // MMCTX_BASE
                bset $r9 0                      // BASE_EN
        mmctx_base_disabled:
        or $r14 $r14
-       bra e mmctx_multi_disabled
+       bra e #mmctx_multi_disabled
                iowr I[$r8 + 0x200] $r14        // MMCTX_MULTI_STRIDE
                iowr I[$r8 + 0x300] $r15        // MMCTX_MULTI_MASK
                bset $r9 1                      // MULTI_EN
@@ -264,7 +264,7 @@ mmctx_xfer:
                mmctx_wait_free:
                        iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
                        and $r14 0x1f
-                       bra e mmctx_wait_free
+                       bra e #mmctx_wait_free
 
                // queue up an entry
                ld b32 $r14 D[$r12]
@@ -272,19 +272,19 @@ mmctx_xfer:
                iowr I[$r8 + 0x300] $r14
                add b32 $r12 4
                cmpu b32 $r12 $r13
-               bra ne mmctx_exec_loop
+               bra ne #mmctx_exec_loop
 
        xbit $r11 $r10 2
-       bra ne mmctx_stop
+       bra ne #mmctx_stop
                // wait for queue to empty
                mmctx_fini_wait:
                        iord $r11 I[$r8 + 0x000]        // MMCTX_CTRL
                        and $r11 0x1f
                        cmpu b32 $r11 0x10
-                       bra ne mmctx_fini_wait
+                       bra ne #mmctx_fini_wait
                mov $r10 2                              // DONE_MMCTX
-               call wait_donez
-               bra mmctx_done
+               call #wait_donez
+               bra #mmctx_done
        mmctx_stop:
                xbit $r11 $r10 0
                shl b32 $r11 16                 // DIR
@@ -295,7 +295,7 @@ mmctx_xfer:
                        // wait for STOP_TRIGGER to clear
                        iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
                        xbit $r11 $r11 18
-                       bra ne mmctx_stop_wait
+                       bra ne #mmctx_stop_wait
        mmctx_done:
        trace_clr(T_MMCTX)
        ret
@@ -305,7 +305,7 @@ mmctx_xfer:
 strand_wait:
        push $r10
        mov $r10 2
-       call wait_donez
+       call #wait_donez
        pop $r10
        ret
 
@@ -316,7 +316,7 @@ strand_pre:
        sethi $r8 0x20000
        mov $r9 0xc
        iowr I[$r8] $r9
-       call strand_wait
+       call #strand_wait
        ret
 
 // unknown - call after issuing strand commands
@@ -326,7 +326,7 @@ strand_post:
        sethi $r8 0x20000
        mov $r9 0xd
        iowr I[$r8] $r9
-       call strand_wait
+       call #strand_wait
        ret
 
 // Selects strand set?!
@@ -341,11 +341,11 @@ strand_set:
        iowr I[$r10 + 0x000] $r12               // 0x93c = 0xf
        mov $r12 0xb
        iowr I[$r11 + 0x000] $r12               // 0x928 = 0xb
-       call strand_wait
+       call #strand_wait
        iowr I[$r10 + 0x000] $r14               // 0x93c = <id>
        mov $r12 0xa
        iowr I[$r11 + 0x000] $r12               // 0x928 = 0xa
-       call strand_wait
+       call #strand_wait
        ret
 
 // Initialise strand context data
@@ -357,22 +357,22 @@ strand_set:
 //
 strand_ctx_init:
        trace_set(T_STRINIT)
-       call strand_pre
+       call #strand_pre
        mov $r14 3
-       call strand_set
+       call #strand_set
        mov $r10 0x46fc
        sethi $r10 0x20000
        add b32 $r11 $r10 0x400
        iowr I[$r10 + 0x100] $r0        // STRAND_FIRST_GENE = 0
        mov $r12 1
        iowr I[$r11 + 0x000] $r12       // STRAND_CMD = LATCH_FIRST_GENE
-       call strand_wait
+       call #strand_wait
        sub b32 $r12 $r0 1
        iowr I[$r10 + 0x000] $r12       // STRAND_GENE_CNT = 0xffffffff
        mov $r12 2
        iowr I[$r11 + 0x000] $r12       // STRAND_CMD = LATCH_GENE_CNT
-       call strand_wait
-       call strand_post
+       call #strand_wait
+       call #strand_post
 
        // read the size of each strand, poke the context offset of
        // each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
@@ -391,7 +391,7 @@ strand_ctx_init:
                add b32 $r14 $r10
                add b32 $r8 4
                sub b32 $r9 1
-               bra ne ctx_init_strand_loop
+               bra ne #ctx_init_strand_loop
 
        shl b32 $r14 8
        sub b32 $r15 $r14 $r15
index 636fe98..91d44ea 100644 (file)
@@ -87,6 +87,7 @@ nvc0_graph_class(struct drm_device *dev)
        case 0xc1:
                return 0x9197;
        case 0xc8:
+       case 0xd9:
                return 0x9297;
        default:
                return 0;
index 96b0b93..de77842 100644 (file)
@@ -1268,6 +1268,17 @@ nvc0_grctx_generate_9039(struct drm_device *dev)
 static void
 nvc0_grctx_generate_90c0(struct drm_device *dev)
 {
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       int i;
+
+       for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
+               nv_mthd(dev, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
+               nv_mthd(dev, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
+               nv_mthd(dev, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
+               nv_mthd(dev, 0x90c0, 0x2724 + (i * 0x40), 0x00000000);
+               nv_mthd(dev, 0x90c0, 0x2708 + (i * 0x40), 0x00000000);
+               nv_mthd(dev, 0x90c0, 0x2728 + (i * 0x40), 0x00000000);
+       }
        nv_mthd(dev, 0x90c0, 0x270c, 0x00000000);
        nv_mthd(dev, 0x90c0, 0x272c, 0x00000000);
        nv_mthd(dev, 0x90c0, 0x274c, 0x00000000);
@@ -1276,6 +1287,12 @@ nvc0_grctx_generate_90c0(struct drm_device *dev)
        nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000);
        nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000);
        nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000);
+       for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
+               nv_mthd(dev, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
+               nv_mthd(dev, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
+               nv_mthd(dev, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
+               nv_mthd(dev, 0x90c0, 0x2734 + (i * 0x40), 0x00000040);
+       }
        nv_mthd(dev, 0x90c0, 0x030c, 0x00000001);
        nv_mthd(dev, 0x90c0, 0x1944, 0x00000000);
        nv_mthd(dev, 0x90c0, 0x0758, 0x00000100);
@@ -1471,14 +1488,20 @@ nvc0_grctx_generate_shaders(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 
-       if (dev_priv->chipset != 0xc1) {
-               nv_wr32(dev, 0x405800, 0x078000bf);
-               nv_wr32(dev, 0x405830, 0x02180000);
-       } else {
+       if (dev_priv->chipset == 0xd9) {
                nv_wr32(dev, 0x405800, 0x0f8000bf);
                nv_wr32(dev, 0x405830, 0x02180218);
+               nv_wr32(dev, 0x405834, 0x08000000);
+       } else
+       if (dev_priv->chipset == 0xc1) {
+               nv_wr32(dev, 0x405800, 0x0f8000bf);
+               nv_wr32(dev, 0x405830, 0x02180218);
+               nv_wr32(dev, 0x405834, 0x00000000);
+       } else {
+               nv_wr32(dev, 0x405800, 0x078000bf);
+               nv_wr32(dev, 0x405830, 0x02180000);
+               nv_wr32(dev, 0x405834, 0x00000000);
        }
-       nv_wr32(dev, 0x405834, 0x00000000);
        nv_wr32(dev, 0x405838, 0x00000000);
        nv_wr32(dev, 0x405854, 0x00000000);
        nv_wr32(dev, 0x405870, 0x00000001);
@@ -1509,7 +1532,10 @@ nvc0_grctx_generate_unk64xx(struct drm_device *dev)
        nv_wr32(dev, 0x4064ac, 0x00003fff);
        nv_wr32(dev, 0x4064b4, 0x00000000);
        nv_wr32(dev, 0x4064b8, 0x00000000);
-       if (dev_priv->chipset == 0xc1) {
+       if (dev_priv->chipset == 0xd9)
+               nv_wr32(dev, 0x4064bc, 0x00000000);
+       if (dev_priv->chipset == 0xc1 ||
+           dev_priv->chipset == 0xd9) {
                nv_wr32(dev, 0x4064c0, 0x80140078);
                nv_wr32(dev, 0x4064c4, 0x0086ffff);
        }
@@ -1550,10 +1576,23 @@ nvc0_grctx_generate_rop(struct drm_device *dev)
        /* ROPC_BROADCAST */
        nv_wr32(dev, 0x408800, 0x02802a3c);
        nv_wr32(dev, 0x408804, 0x00000040);
-       nv_wr32(dev, 0x408808, chipset != 0xc1 ? 0x0003e00d : 0x1003e005);
-       nv_wr32(dev, 0x408900, 0x3080b801);
-       nv_wr32(dev, 0x408904, chipset != 0xc1 ? 0x02000001 : 0x62000001);
-       nv_wr32(dev, 0x408908, 0x00c80929);
+       if (chipset == 0xd9) {
+               nv_wr32(dev, 0x408808, 0x1043e005);
+               nv_wr32(dev, 0x408900, 0x3080b801);
+               nv_wr32(dev, 0x408904, 0x1043e005);
+               nv_wr32(dev, 0x408908, 0x00c8102f);
+       } else
+       if (chipset == 0xc1) {
+               nv_wr32(dev, 0x408808, 0x1003e005);
+               nv_wr32(dev, 0x408900, 0x3080b801);
+               nv_wr32(dev, 0x408904, 0x62000001);
+               nv_wr32(dev, 0x408908, 0x00c80929);
+       } else {
+               nv_wr32(dev, 0x408808, 0x0003e00d);
+               nv_wr32(dev, 0x408900, 0x3080b801);
+               nv_wr32(dev, 0x408904, 0x02000001);
+               nv_wr32(dev, 0x408908, 0x00c80929);
+       }
        nv_wr32(dev, 0x40890c, 0x00000000);
        nv_wr32(dev, 0x408980, 0x0000011d);
 }
@@ -1572,7 +1611,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
        nv_wr32(dev, 0x418408, 0x00000000);
        nv_wr32(dev, 0x41840c, 0x00001008);
        nv_wr32(dev, 0x418410, 0x0fff0fff);
-       nv_wr32(dev, 0x418414, 0x00200fff);
+       nv_wr32(dev, 0x418414, chipset != 0xd9 ? 0x00200fff : 0x02200fff);
        nv_wr32(dev, 0x418450, 0x00000000);
        nv_wr32(dev, 0x418454, 0x00000000);
        nv_wr32(dev, 0x418458, 0x00000000);
@@ -1587,14 +1626,17 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
        nv_wr32(dev, 0x418700, 0x00000002);
        nv_wr32(dev, 0x418704, 0x00000080);
        nv_wr32(dev, 0x418708, 0x00000000);
-       nv_wr32(dev, 0x41870c, 0x07c80000);
+       nv_wr32(dev, 0x41870c, chipset != 0xd9 ? 0x07c80000 : 0x00000000);
        nv_wr32(dev, 0x418710, 0x00000000);
-       nv_wr32(dev, 0x418800, 0x0006860a);
+       nv_wr32(dev, 0x418800, chipset != 0xd9 ? 0x0006860a : 0x7006860a);
        nv_wr32(dev, 0x418808, 0x00000000);
        nv_wr32(dev, 0x41880c, 0x00000000);
        nv_wr32(dev, 0x418810, 0x00000000);
        nv_wr32(dev, 0x418828, 0x00008442);
-       nv_wr32(dev, 0x418830, chipset != 0xc1 ? 0x00000001 : 0x10000001);
+       if (chipset == 0xc1 || chipset == 0xd9)
+               nv_wr32(dev, 0x418830, 0x10000001);
+       else
+               nv_wr32(dev, 0x418830, 0x00000001);
        nv_wr32(dev, 0x4188d8, 0x00000008);
        nv_wr32(dev, 0x4188e0, 0x01000000);
        nv_wr32(dev, 0x4188e8, 0x00000000);
@@ -1602,7 +1644,12 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
        nv_wr32(dev, 0x4188f0, 0x00000000);
        nv_wr32(dev, 0x4188f4, 0x00000000);
        nv_wr32(dev, 0x4188f8, 0x00000000);
-       nv_wr32(dev, 0x4188fc, chipset != 0xc1 ? 0x00100000 : 0x00100018);
+       if (chipset == 0xd9)
+               nv_wr32(dev, 0x4188fc, 0x20100008);
+       else if (chipset == 0xc1)
+               nv_wr32(dev, 0x4188fc, 0x00100018);
+       else
+               nv_wr32(dev, 0x4188fc, 0x00100000);
        nv_wr32(dev, 0x41891c, 0x00ff00ff);
        nv_wr32(dev, 0x418924, 0x00000000);
        nv_wr32(dev, 0x418928, 0x00ffff00);
@@ -1616,7 +1663,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
                nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000);
                nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000);
        }
-       nv_wr32(dev, 0x418b00, 0x00000000);
+       nv_wr32(dev, 0x418b00, chipset != 0xd9 ? 0x00000000 : 0x00000006);
        nv_wr32(dev, 0x418b08, 0x0a418820);
        nv_wr32(dev, 0x418b0c, 0x062080e6);
        nv_wr32(dev, 0x418b10, 0x020398a4);
@@ -1633,7 +1680,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
        nv_wr32(dev, 0x418c24, 0x00000000);
        nv_wr32(dev, 0x418c28, 0x00000000);
        nv_wr32(dev, 0x418c2c, 0x00000000);
-       if (chipset == 0xc1)
+       if (chipset == 0xc1 || chipset == 0xd9)
                nv_wr32(dev, 0x418c6c, 0x00000001);
        nv_wr32(dev, 0x418c80, 0x20200004);
        nv_wr32(dev, 0x418c8c, 0x00000001);
@@ -1653,7 +1700,10 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
        nv_wr32(dev, 0x419818, 0x00000000);
        nv_wr32(dev, 0x41983c, 0x00038bc7);
        nv_wr32(dev, 0x419848, 0x00000000);
-       nv_wr32(dev, 0x419864, chipset != 0xc1 ? 0x0000012a : 0x00000129);
+       if (chipset == 0xc1 || chipset == 0xd9)
+               nv_wr32(dev, 0x419864, 0x00000129);
+       else
+               nv_wr32(dev, 0x419864, 0x0000012a);
        nv_wr32(dev, 0x419888, 0x00000000);
        nv_wr32(dev, 0x419a00, 0x000001f0);
        nv_wr32(dev, 0x419a04, 0x00000001);
@@ -1663,7 +1713,9 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
        nv_wr32(dev, 0x419a14, 0x00000200);
        nv_wr32(dev, 0x419a1c, 0x00000000);
        nv_wr32(dev, 0x419a20, 0x00000800);
-       if (chipset != 0xc0 && chipset != 0xc8)
+       if (chipset == 0xd9)
+               nv_wr32(dev, 0x00419ac4, 0x0017f440);
+       else if (chipset != 0xc0 && chipset != 0xc8)
                nv_wr32(dev, 0x00419ac4, 0x0007f440);
        nv_wr32(dev, 0x419b00, 0x0a418820);
        nv_wr32(dev, 0x419b04, 0x062080e6);
@@ -1672,21 +1724,33 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
        nv_wr32(dev, 0x419b10, 0x0a418820);
        nv_wr32(dev, 0x419b14, 0x000000e6);
        nv_wr32(dev, 0x419bd0, 0x00900103);
-       nv_wr32(dev, 0x419be0, chipset != 0xc1 ? 0x00000001 : 0x00400001);
+       if (chipset == 0xc1 || chipset == 0xd9)
+               nv_wr32(dev, 0x419be0, 0x00400001);
+       else
+               nv_wr32(dev, 0x419be0, 0x00000001);
        nv_wr32(dev, 0x419be4, 0x00000000);
-       nv_wr32(dev, 0x419c00, 0x00000002);
+       nv_wr32(dev, 0x419c00, chipset != 0xd9 ? 0x00000002 : 0x0000000a);
        nv_wr32(dev, 0x419c04, 0x00000006);
        nv_wr32(dev, 0x419c08, 0x00000002);
        nv_wr32(dev, 0x419c20, 0x00000000);
-       if (chipset == 0xce || chipset == 0xcf)
+       if (dev_priv->chipset == 0xd9) {
+               nv_wr32(dev, 0x419c24, 0x00084210);
+               nv_wr32(dev, 0x419c28, 0x3cf3cf3c);
                nv_wr32(dev, 0x419cb0, 0x00020048);
-       else
+       } else
+       if (chipset == 0xce || chipset == 0xcf) {
+               nv_wr32(dev, 0x419cb0, 0x00020048);
+       } else {
                nv_wr32(dev, 0x419cb0, 0x00060048);
+       }
        nv_wr32(dev, 0x419ce8, 0x00000000);
        nv_wr32(dev, 0x419cf4, 0x00000183);
-       nv_wr32(dev, 0x419d20, chipset != 0xc1 ? 0x02180000 : 0x12180000);
+       if (chipset == 0xc1 || chipset == 0xd9)
+               nv_wr32(dev, 0x419d20, 0x12180000);
+       else
+               nv_wr32(dev, 0x419d20, 0x02180000);
        nv_wr32(dev, 0x419d24, 0x00001fff);
-       if (chipset == 0xc1)
+       if (chipset == 0xc1 || chipset == 0xd9)
                nv_wr32(dev, 0x419d44, 0x02180218);
        nv_wr32(dev, 0x419e04, 0x00000000);
        nv_wr32(dev, 0x419e08, 0x00000000);
@@ -1986,6 +2050,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
        nv_icmd(dev, 0x00000215, 0x00000040);
        nv_icmd(dev, 0x00000216, 0x00000040);
        nv_icmd(dev, 0x00000217, 0x00000040);
+       if (dev_priv->chipset == 0xd9) {
+               for (i = 0x0400; i <= 0x0417; i++)
+                       nv_icmd(dev, i, 0x00000040);
+       }
        nv_icmd(dev, 0x00000218, 0x0000c080);
        nv_icmd(dev, 0x00000219, 0x0000c080);
        nv_icmd(dev, 0x0000021a, 0x0000c080);
@@ -1994,6 +2062,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
        nv_icmd(dev, 0x0000021d, 0x0000c080);
        nv_icmd(dev, 0x0000021e, 0x0000c080);
        nv_icmd(dev, 0x0000021f, 0x0000c080);
+       if (dev_priv->chipset == 0xd9) {
+               for (i = 0x0440; i <= 0x0457; i++)
+                       nv_icmd(dev, i, 0x0000c080);
+       }
        nv_icmd(dev, 0x000000ad, 0x0000013e);
        nv_icmd(dev, 0x000000e1, 0x00000010);
        nv_icmd(dev, 0x00000290, 0x00000000);
@@ -2556,7 +2628,8 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
        nv_icmd(dev, 0x0000053f, 0xffff0000);
        nv_icmd(dev, 0x00000585, 0x0000003f);
        nv_icmd(dev, 0x00000576, 0x00000003);
-       if (dev_priv->chipset == 0xc1)
+       if (dev_priv->chipset == 0xc1 ||
+           dev_priv->chipset == 0xd9)
                nv_icmd(dev, 0x0000057b, 0x00000059);
        nv_icmd(dev, 0x00000586, 0x00000040);
        nv_icmd(dev, 0x00000582, 0x00000080);
@@ -2658,6 +2731,8 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
        nv_icmd(dev, 0x00000957, 0x00000003);
        nv_icmd(dev, 0x0000095e, 0x20164010);
        nv_icmd(dev, 0x0000095f, 0x00000020);
+       if (dev_priv->chipset == 0xd9)
+               nv_icmd(dev, 0x0000097d, 0x00000020);
        nv_icmd(dev, 0x00000683, 0x00000006);
        nv_icmd(dev, 0x00000685, 0x003fffff);
        nv_icmd(dev, 0x00000687, 0x00000c48);
index 06f5e26..15272be 100644 (file)
@@ -32,7 +32,7 @@
  * - watchdog timer around ctx operations
  */
 
-.section nvc0_grgpc_data
+.section #nvc0_grgpc_data
 include(`nvc0_graph.fuc')
 gpc_id:                        .b32 0
 gpc_mmio_list_head:    .b32 0
@@ -48,40 +48,45 @@ cmd_queue:          queue_init
 // chipset descriptions
 chipsets:
 .b8  0xc0 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc0_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc0_tpc_mmio_tail
 .b8  0xc1 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc1_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc1_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc1_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc1_tpc_mmio_tail
 .b8  0xc3 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc3_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc3_tpc_mmio_tail
 .b8  0xc4 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc3_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc3_tpc_mmio_tail
 .b8  0xc8 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc0_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc0_tpc_mmio_tail
 .b8  0xce 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc3_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc3_tpc_mmio_tail
 .b8  0xcf 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvcf_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvcf_tpc_mmio_tail
+.b8  0xd9 0 0 0
+.b16 #nvd9_gpc_mmio_head
+.b16 #nvd9_gpc_mmio_tail
+.b16 #nvd9_tpc_mmio_head
+.b16 #nvd9_tpc_mmio_tail
 .b8  0 0 0 0
 
 // GPC mmio lists
@@ -114,6 +119,35 @@ nvc0_gpc_mmio_tail:
 mmctx_data(0x000c6c, 1);
 nvc1_gpc_mmio_tail:
 
+nvd9_gpc_mmio_head:
+mmctx_data(0x000380, 1)
+mmctx_data(0x000400, 2)
+mmctx_data(0x00040c, 3)
+mmctx_data(0x000450, 9)
+mmctx_data(0x000600, 1)
+mmctx_data(0x000684, 1)
+mmctx_data(0x000700, 5)
+mmctx_data(0x000800, 1)
+mmctx_data(0x000808, 3)
+mmctx_data(0x000828, 1)
+mmctx_data(0x000830, 1)
+mmctx_data(0x0008d8, 1)
+mmctx_data(0x0008e0, 1)
+mmctx_data(0x0008e8, 6)
+mmctx_data(0x00091c, 1)
+mmctx_data(0x000924, 3)
+mmctx_data(0x000b00, 1)
+mmctx_data(0x000b08, 6)
+mmctx_data(0x000bb8, 1)
+mmctx_data(0x000c08, 1)
+mmctx_data(0x000c10, 8)
+mmctx_data(0x000c6c, 1)
+mmctx_data(0x000c80, 1)
+mmctx_data(0x000c8c, 1)
+mmctx_data(0x001000, 3)
+mmctx_data(0x001014, 1)
+nvd9_gpc_mmio_tail:
+
 // TPC mmio lists
 nvc0_tpc_mmio_head:
 mmctx_data(0x000018, 1)
@@ -146,9 +180,34 @@ nvc3_tpc_mmio_tail:
 mmctx_data(0x000544, 1)
 nvc1_tpc_mmio_tail:
 
+nvd9_tpc_mmio_head:
+mmctx_data(0x000018, 1)
+mmctx_data(0x00003c, 1)
+mmctx_data(0x000048, 1)
+mmctx_data(0x000064, 1)
+mmctx_data(0x000088, 1)
+mmctx_data(0x000200, 6)
+mmctx_data(0x00021c, 2)
+mmctx_data(0x0002c4, 1)
+mmctx_data(0x000300, 6)
+mmctx_data(0x0003d0, 1)
+mmctx_data(0x0003e0, 2)
+mmctx_data(0x000400, 3)
+mmctx_data(0x000420, 3)
+mmctx_data(0x0004b0, 1)
+mmctx_data(0x0004e8, 1)
+mmctx_data(0x0004f4, 1)
+mmctx_data(0x000520, 2)
+mmctx_data(0x000544, 1)
+mmctx_data(0x000604, 4)
+mmctx_data(0x000644, 20)
+mmctx_data(0x000698, 1)
+mmctx_data(0x0006e0, 1)
+mmctx_data(0x000750, 3)
+nvd9_tpc_mmio_tail:
 
-.section nvc0_grgpc_code
-bra init
+.section #nvc0_grgpc_code
+bra #init
 define(`include_code')
 include(`nvc0_graph.fuc')
 
@@ -160,10 +219,10 @@ error:
        push $r14
        mov $r14 -0x67ec        // 0x9814
        sethi $r14 0x400000
-       call nv_wr32            // HUB_CTXCTL_CC_SCRATCH[5] = error code
+       call #nv_wr32           // HUB_CTXCTL_CC_SCRATCH[5] = error code
        add b32 $r14 0x41c
        mov $r15 1
-       call nv_wr32            // HUB_CTXCTL_INTR_UP_SET
+       call #nv_wr32           // HUB_CTXCTL_INTR_UP_SET
        pop $r14
        ret
 
@@ -190,7 +249,7 @@ init:
        iowr I[$r1 + 0x000] $r2         // FIFO_ENABLE
 
        // setup i0 handler, and route all interrupts to it
-       mov $r1 ih
+       mov $r1 #ih
        mov $iv0 $r1
        mov $r1 0x400
        iowr I[$r1 + 0x300] $r0         // INTR_DISPATCH
@@ -210,24 +269,24 @@ init:
        and $r2 0x1f
        shl b32 $r3 $r2
        sub b32 $r3 1
-       st b32 D[$r0 + tpc_count] $r2
-       st b32 D[$r0 + tpc_mask] $r3
+       st b32 D[$r0 + #tpc_count] $r2
+       st b32 D[$r0 + #tpc_mask] $r3
        add b32 $r1 0x400
        iord $r2 I[$r1 + 0x000]         // MYINDEX
-       st b32 D[$r0 + gpc_id] $r2
+       st b32 D[$r0 + #gpc_id] $r2
 
        // find context data for this chipset
        mov $r2 0x800
        shl b32 $r2 6
        iord $r2 I[$r2 + 0x000]         // CC_SCRATCH[0]
-       mov $r1 chipsets - 12
+       mov $r1 #chipsets - 12
        init_find_chipset:
                add b32 $r1 12
                ld b32 $r3 D[$r1 + 0x00]
                cmpu b32 $r3 $r2
-               bra e init_context
+               bra e #init_context
                cmpu b32 $r3 0
-               bra ne init_find_chipset
+               bra ne #init_find_chipset
                // unknown chipset
                ret
 
@@ -253,19 +312,19 @@ init:
        clear b32 $r15
        ld b16 $r14 D[$r1 + 4]
        ld b16 $r15 D[$r1 + 6]
-       st b16 D[$r0 + gpc_mmio_list_head] $r14
-       st b16 D[$r0 + gpc_mmio_list_tail] $r15
-       call mmctx_size
+       st b16 D[$r0 + #gpc_mmio_list_head] $r14
+       st b16 D[$r0 + #gpc_mmio_list_tail] $r15
+       call #mmctx_size
        add b32 $r2 $r15
        add b32 $r3 $r15
 
        // calculate per-TPC mmio context size, store the list pointers
        ld b16 $r14 D[$r1 + 8]
        ld b16 $r15 D[$r1 + 10]
-       st b16 D[$r0 + tpc_mmio_list_head] $r14
-       st b16 D[$r0 + tpc_mmio_list_tail] $r15
-       call mmctx_size
-       ld b32 $r14 D[$r0 + tpc_count]
+       st b16 D[$r0 + #tpc_mmio_list_head] $r14
+       st b16 D[$r0 + #tpc_mmio_list_tail] $r15
+       call #mmctx_size
+       ld b32 $r14 D[$r0 + #tpc_count]
        mulu $r14 $r15
        add b32 $r2 $r14
        add b32 $r3 $r14
@@ -283,7 +342,7 @@ init:
 
        // calculate size of strand context data
        mov b32 $r15 $r2
-       call strand_ctx_init
+       call #strand_ctx_init
        add b32 $r3 $r15
 
        // save context size, and tell HUB we're done
@@ -301,13 +360,13 @@ init:
 main:
        bset $flags $p0
        sleep $p0
-       mov $r13 cmd_queue
-       call queue_get
-       bra $p1 main
+       mov $r13 #cmd_queue
+       call #queue_get
+       bra $p1 #main
 
        // 0x0000-0x0003 are all context transfers
        cmpu b32 $r14 0x04
-       bra nc main_not_ctx_xfer
+       bra nc #main_not_ctx_xfer
                // fetch $flags and mask off $p1/$p2
                mov $r1 $flags
                mov $r2 0x0006
@@ -318,14 +377,14 @@ main:
                or $r1 $r14
                mov $flags $r1
                // transfer context data
-               call ctx_xfer
-               bra main
+               call #ctx_xfer
+               bra #main
 
        main_not_ctx_xfer:
        shl b32 $r15 $r14 16
        or $r15 E_BAD_COMMAND
-       call error
-       bra main
+       call #error
+       bra #main
 
 // interrupt handler
 ih:
@@ -342,13 +401,13 @@ ih:
        // incoming fifo command?
        iord $r10 I[$r0 + 0x200]        // INTR
        and $r11 $r10 0x00000004
-       bra e ih_no_fifo
+       bra e #ih_no_fifo
                // queue incoming fifo command for later processing
                mov $r11 0x1900
-               mov $r13 cmd_queue
+               mov $r13 #cmd_queue
                iord $r14 I[$r11 + 0x100]       // FIFO_CMD
                iord $r15 I[$r11 + 0x000]       // FIFO_DATA
-               call queue_put
+               call #queue_put
                add b32 $r11 0x400
                mov $r14 1
                iowr I[$r11 + 0x000] $r14       // FIFO_ACK
@@ -374,11 +433,11 @@ ih:
 //
 hub_barrier_done:
        mov $r15 1
-       ld b32 $r14 D[$r0 + gpc_id]
+       ld b32 $r14 D[$r0 + #gpc_id]
        shl b32 $r15 $r14
        mov $r14 -0x6be8        // 0x409418 - HUB_BAR_SET
        sethi $r14 0x400000
-       call nv_wr32
+       call #nv_wr32
        ret
 
 // Disables various things, waits a bit, and re-enables them..
@@ -395,7 +454,7 @@ ctx_redswitch:
        mov $r15 8
        ctx_redswitch_delay:
                sub b32 $r15 1
-               bra ne ctx_redswitch_delay
+               bra ne #ctx_redswitch_delay
        mov $r15 0xa20
        iowr I[$r14] $r15       // GPC_RED_SWITCH = UNK11, ENABLE, POWER
        ret
@@ -413,8 +472,8 @@ ctx_xfer:
        mov $r1 0xa04
        shl b32 $r1 6
        iowr I[$r1 + 0x000] $r15// MEM_BASE
-       bra not $p1 ctx_xfer_not_load
-               call ctx_redswitch
+       bra not $p1 #ctx_xfer_not_load
+               call #ctx_redswitch
        ctx_xfer_not_load:
 
        // strands
@@ -422,7 +481,7 @@ ctx_xfer:
        sethi $r1 0x20000
        mov $r2 0xc
        iowr I[$r1] $r2         // STRAND_CMD(0x3f) = 0x0c
-       call strand_wait
+       call #strand_wait
        mov $r2 0x47fc
        sethi $r2 0x20000
        iowr I[$r2] $r0         // STRAND_FIRST_GENE(0x3f) = 0x00
@@ -435,46 +494,46 @@ ctx_xfer:
        or $r10 2               // first
        mov $r11 0x0000
        sethi $r11 0x500000
-       ld b32 $r12 D[$r0 + gpc_id]
+       ld b32 $r12 D[$r0 + #gpc_id]
        shl b32 $r12 15
        add b32 $r11 $r12       // base = NV_PGRAPH_GPCn
-       ld b32 $r12 D[$r0 + gpc_mmio_list_head]
-       ld b32 $r13 D[$r0 + gpc_mmio_list_tail]
+       ld b32 $r12 D[$r0 + #gpc_mmio_list_head]
+       ld b32 $r13 D[$r0 + #gpc_mmio_list_tail]
        mov $r14 0              // not multi
-       call mmctx_xfer
+       call #mmctx_xfer
 
        // per-TPC mmio context
        xbit $r10 $flags $p1    // direction
        or $r10 4               // last
        mov $r11 0x4000
        sethi $r11 0x500000     // base = NV_PGRAPH_GPC0_TPC0
-       ld b32 $r12 D[$r0 + gpc_id]
+       ld b32 $r12 D[$r0 + #gpc_id]
        shl b32 $r12 15
        add b32 $r11 $r12       // base = NV_PGRAPH_GPCn_TPC0
-       ld b32 $r12 D[$r0 + tpc_mmio_list_head]
-       ld b32 $r13 D[$r0 + tpc_mmio_list_tail]
-       ld b32 $r15 D[$r0 + tpc_mask]
+       ld b32 $r12 D[$r0 + #tpc_mmio_list_head]
+       ld b32 $r13 D[$r0 + #tpc_mmio_list_tail]
+       ld b32 $r15 D[$r0 + #tpc_mask]
        mov $r14 0x800          // stride = 0x800
-       call mmctx_xfer
+       call #mmctx_xfer
 
        // wait for strands to finish
-       call strand_wait
+       call #strand_wait
 
        // if load, or a save without a load following, do some
        // unknown stuff that's done after finishing a block of
        // strand commands
-       bra $p1 ctx_xfer_post
-       bra not $p2 ctx_xfer_done
+       bra $p1 #ctx_xfer_post
+       bra not $p2 #ctx_xfer_done
        ctx_xfer_post:
                mov $r1 0x4afc
                sethi $r1 0x20000
                mov $r2 0xd
                iowr I[$r1] $r2         // STRAND_CMD(0x3f) = 0x0d
-               call strand_wait
+               call #strand_wait
 
        // mark completion in HUB's barrier
        ctx_xfer_done:
-       call hub_barrier_done
+       call #hub_barrier_done
        ret
 
 .align 256
index 6f82032..a988b8a 100644 (file)
@@ -25,26 +25,29 @@ uint32_t nvc0_grgpc_data[] = {
        0x00000000,
        0x00000000,
        0x000000c0,
-       0x011c00bc,
-       0x01700120,
+       0x012800c8,
+       0x01e40194,
        0x000000c1,
-       0x012000bc,
-       0x01840120,
+       0x012c00c8,
+       0x01f80194,
        0x000000c3,
-       0x011c00bc,
-       0x01800120,
+       0x012800c8,
+       0x01f40194,
        0x000000c4,
-       0x011c00bc,
-       0x01800120,
+       0x012800c8,
+       0x01f40194,
        0x000000c8,
-       0x011c00bc,
-       0x01700120,
+       0x012800c8,
+       0x01e40194,
        0x000000ce,
-       0x011c00bc,
-       0x01800120,
+       0x012800c8,
+       0x01f40194,
        0x000000cf,
-       0x011c00bc,
-       0x017c0120,
+       0x012800c8,
+       0x01f00194,
+       0x000000d9,
+       0x0194012c,
+       0x025401f8,
        0x00000000,
        0x00000380,
        0x14000400,
@@ -71,6 +74,32 @@ uint32_t nvc0_grgpc_data[] = {
        0x08001000,
        0x00001014,
        0x00000c6c,
+       0x00000380,
+       0x04000400,
+       0x0800040c,
+       0x20000450,
+       0x00000600,
+       0x00000684,
+       0x10000700,
+       0x00000800,
+       0x08000808,
+       0x00000828,
+       0x00000830,
+       0x000008d8,
+       0x000008e0,
+       0x140008e8,
+       0x0000091c,
+       0x08000924,
+       0x00000b00,
+       0x14000b08,
+       0x00000bb8,
+       0x00000c08,
+       0x1c000c10,
+       0x00000c6c,
+       0x00000c80,
+       0x00000c8c,
+       0x08001000,
+       0x00001014,
        0x00000018,
        0x0000003c,
        0x00000048,
@@ -96,6 +125,29 @@ uint32_t nvc0_grgpc_data[] = {
        0x000006e0,
        0x000004bc,
        0x00000544,
+       0x00000018,
+       0x0000003c,
+       0x00000048,
+       0x00000064,
+       0x00000088,
+       0x14000200,
+       0x0400021c,
+       0x000002c4,
+       0x14000300,
+       0x000003d0,
+       0x040003e0,
+       0x08000400,
+       0x08000420,
+       0x000004b0,
+       0x000004e8,
+       0x000004f4,
+       0x04000520,
+       0x00000544,
+       0x0c000604,
+       0x4c000644,
+       0x00000698,
+       0x000006e0,
+       0x08000750,
 };
 
 uint32_t nvc0_grgpc_code[] = {
index e4f8c7e..98acddb 100644 (file)
@@ -27,7 +27,7 @@
  *    m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h
  */
 
-.section nvc0_grhub_data
+.section #nvc0_grhub_data
 include(`nvc0_graph.fuc')
 gpc_count:             .b32 0
 rop_count:             .b32 0
@@ -39,26 +39,29 @@ ctx_current:                .b32 0
 
 chipsets:
 .b8  0xc0 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
 .b8  0xc1 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc1_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc1_hub_mmio_tail
 .b8  0xc3 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
 .b8  0xc4 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
 .b8  0xc8 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
 .b8  0xce 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
 .b8  0xcf 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
+.b8  0xd9 0 0 0
+.b16 #nvd9_hub_mmio_head
+.b16 #nvd9_hub_mmio_tail
 .b8  0 0 0 0
 
 nvc0_hub_mmio_head:
@@ -105,6 +108,48 @@ nvc0_hub_mmio_tail:
 mmctx_data(0x4064c0, 2)
 nvc1_hub_mmio_tail:
 
+nvd9_hub_mmio_head:
+mmctx_data(0x17e91c, 2)
+mmctx_data(0x400204, 2)
+mmctx_data(0x404004, 10)
+mmctx_data(0x404044, 1)
+mmctx_data(0x404094, 14)
+mmctx_data(0x4040d0, 7)
+mmctx_data(0x4040f8, 1)
+mmctx_data(0x404130, 3)
+mmctx_data(0x404150, 3)
+mmctx_data(0x404164, 2)
+mmctx_data(0x404178, 2)
+mmctx_data(0x404200, 8)
+mmctx_data(0x404404, 14)
+mmctx_data(0x404460, 4)
+mmctx_data(0x404480, 1)
+mmctx_data(0x404498, 1)
+mmctx_data(0x404604, 4)
+mmctx_data(0x404618, 32)
+mmctx_data(0x404698, 21)
+mmctx_data(0x4046f0, 2)
+mmctx_data(0x404700, 22)
+mmctx_data(0x405800, 1)
+mmctx_data(0x405830, 3)
+mmctx_data(0x405854, 1)
+mmctx_data(0x405870, 4)
+mmctx_data(0x405a00, 2)
+mmctx_data(0x405a18, 1)
+mmctx_data(0x406020, 1)
+mmctx_data(0x406028, 4)
+mmctx_data(0x4064a8, 2)
+mmctx_data(0x4064b4, 5)
+mmctx_data(0x407804, 1)
+mmctx_data(0x40780c, 6)
+mmctx_data(0x4078bc, 1)
+mmctx_data(0x408000, 7)
+mmctx_data(0x408064, 1)
+mmctx_data(0x408800, 3)
+mmctx_data(0x408900, 4)
+mmctx_data(0x408980, 1)
+nvd9_hub_mmio_tail:
+
 .align 256
 chan_data:
 chan_mmio_count:       .b32 0
@@ -113,8 +158,8 @@ chan_mmio_address:  .b32 0
 .align 256
 xfer_data:             .b32 0
 
-.section nvc0_grhub_code
-bra init
+.section #nvc0_grhub_code
+bra #init
 define(`include_code')
 include(`nvc0_graph.fuc')
 
@@ -157,7 +202,7 @@ init:
        iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
 
        // setup i0 handler, and route all interrupts to it
-       mov $r1 ih
+       mov $r1 #ih
        mov $iv0 $r1
        mov $r1 0x400
        iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
@@ -201,11 +246,11 @@ init:
        // fetch enabled GPC/ROP counts
        mov $r14 -0x69fc        // 0x409604
        sethi $r14 0x400000
-       call nv_rd32
+       call #nv_rd32
        extr $r1 $r15 16:20
-       st b32 D[$r0 + rop_count] $r1
+       st b32 D[$r0 + #rop_count] $r1
        and $r15 0x1f
-       st b32 D[$r0 + gpc_count] $r15
+       st b32 D[$r0 + #gpc_count] $r15
 
        // set BAR_REQMASK to GPC mask
        mov $r1 1
@@ -220,14 +265,14 @@ init:
        mov $r2 0x800
        shl b32 $r2 6
        iord $r2 I[$r2 + 0x000]         // CC_SCRATCH[0]
-       mov $r15 chipsets - 8
+       mov $r15 #chipsets - 8
        init_find_chipset:
                add b32 $r15 8
                ld b32 $r3 D[$r15 + 0x00]
                cmpu b32 $r3 $r2
-               bra e init_context
+               bra e #init_context
                cmpu b32 $r3 0
-               bra ne init_find_chipset
+               bra ne #init_find_chipset
                // unknown chipset
                ret
 
@@ -239,9 +284,9 @@ init:
        ld b16 $r14 D[$r15 + 4]
        ld b16 $r15 D[$r15 + 6]
        sethi $r14 0
-       st b32 D[$r0 + hub_mmio_list_head] $r14
-       st b32 D[$r0 + hub_mmio_list_tail] $r15
-       call mmctx_size
+       st b32 D[$r0 + #hub_mmio_list_head] $r14
+       st b32 D[$r0 + #hub_mmio_list_tail] $r15
+       call #mmctx_size
 
        // set mmctx base addresses now so we don't have to do it later,
        // they don't (currently) ever change
@@ -260,7 +305,7 @@ init:
        add b32 $r1 1
        shl b32 $r1 8
        mov b32 $r15 $r1
-       call strand_ctx_init
+       call #strand_ctx_init
        add b32 $r1 $r15
 
        // initialise each GPC in sequence by passing in the offset of its
@@ -271,40 +316,40 @@ init:
        // when it has completed, and return the size of its context data
        // in GPCn_CC_SCRATCH[1]
        //
-       ld b32 $r3 D[$r0 + gpc_count]
+       ld b32 $r3 D[$r0 + #gpc_count]
        mov $r4 0x2000
        sethi $r4 0x500000
        init_gpc:
                // setup, and start GPC ucode running
                add b32 $r14 $r4 0x804
                mov b32 $r15 $r1
-               call nv_wr32                    // CC_SCRATCH[1] = ctx offset
+               call #nv_wr32                   // CC_SCRATCH[1] = ctx offset
                add b32 $r14 $r4 0x800
                mov b32 $r15 $r2
-               call nv_wr32                    // CC_SCRATCH[0] = chipset
+               call #nv_wr32                   // CC_SCRATCH[0] = chipset
                add b32 $r14 $r4 0x10c
                clear b32 $r15
-               call nv_wr32
+               call #nv_wr32
                add b32 $r14 $r4 0x104
-               call nv_wr32                    // ENTRY
+               call #nv_wr32                   // ENTRY
                add b32 $r14 $r4 0x100
                mov $r15 2                      // CTRL_START_TRIGGER
-               call nv_wr32                    // CTRL
+               call #nv_wr32                   // CTRL
 
                // wait for it to complete, and adjust context size
                add b32 $r14 $r4 0x800
                init_gpc_wait:
-                       call nv_rd32
+                       call #nv_rd32
                        xbit $r15 $r15 31
-                       bra e init_gpc_wait
+                       bra e #init_gpc_wait
                add b32 $r14 $r4 0x804
-               call nv_rd32
+               call #nv_rd32
                add b32 $r1 $r15
 
                // next!
                add b32 $r4 0x8000
                sub b32 $r3 1
-               bra ne init_gpc
+               bra ne #init_gpc
 
        // save context size, and tell host we're ready
        mov $r2 0x800
@@ -322,13 +367,13 @@ main:
        // sleep until we have something to do
        bset $flags $p0
        sleep $p0
-       mov $r13 cmd_queue
-       call queue_get
-       bra $p1 main
+       mov $r13 #cmd_queue
+       call #queue_get
+       bra $p1 #main
 
        // context switch, requested by GPU?
        cmpu b32 $r14 0x4001
-       bra ne main_not_ctx_switch
+       bra ne #main_not_ctx_switch
                trace_set(T_AUTO)
                mov $r1 0xb00
                shl b32 $r1 6
@@ -336,39 +381,39 @@ main:
                iord $r1 I[$r1 + 0x000]         // CHAN_CUR
 
                xbit $r3 $r1 31
-               bra e chsw_no_prev
+               bra e #chsw_no_prev
                        xbit $r3 $r2 31
-                       bra e chsw_prev_no_next
+                       bra e #chsw_prev_no_next
                                push $r2
                                mov b32 $r2 $r1
                                trace_set(T_SAVE)
                                bclr $flags $p1
                                bset $flags $p2
-                               call ctx_xfer
+                               call #ctx_xfer
                                trace_clr(T_SAVE);
                                pop $r2
                                trace_set(T_LOAD);
                                bset $flags $p1
-                               call ctx_xfer
+                               call #ctx_xfer
                                trace_clr(T_LOAD);
-                               bra chsw_done
+                               bra #chsw_done
                        chsw_prev_no_next:
                                push $r2
                                mov b32 $r2 $r1
                                bclr $flags $p1
                                bclr $flags $p2
-                               call ctx_xfer
+                               call #ctx_xfer
                                pop $r2
                                mov $r1 0xb00
                                shl b32 $r1 6
                                iowr I[$r1] $r2
-                               bra chsw_done
+                               bra #chsw_done
                chsw_no_prev:
                        xbit $r3 $r2 31
-                       bra e chsw_done
+                       bra e #chsw_done
                                bset $flags $p1
                                bclr $flags $p2
-                               call ctx_xfer
+                               call #ctx_xfer
 
                // ack the context switch request
                chsw_done:
@@ -377,32 +422,32 @@ main:
                mov $r2 1
                iowr I[$r1 + 0x000] $r2         // 0x409b0c
                trace_clr(T_AUTO)
-               bra main
+               bra #main
 
        // request to set current channel? (*not* a context switch)
        main_not_ctx_switch:
        cmpu b32 $r14 0x0001
-       bra ne main_not_ctx_chan
+       bra ne #main_not_ctx_chan
                mov b32 $r2 $r15
-               call ctx_chan
-               bra main_done
+               call #ctx_chan
+               bra #main_done
 
        // request to store current channel context?
        main_not_ctx_chan:
        cmpu b32 $r14 0x0002
-       bra ne main_not_ctx_save
+       bra ne #main_not_ctx_save
                trace_set(T_SAVE)
                bclr $flags $p1
                bclr $flags $p2
-               call ctx_xfer
+               call #ctx_xfer
                trace_clr(T_SAVE)
-               bra main_done
+               bra #main_done
 
        main_not_ctx_save:
                shl b32 $r15 $r14 16
                or $r15 E_BAD_COMMAND
-               call error
-               bra main
+               call #error
+               bra #main
 
        main_done:
        mov $r1 0x820
@@ -410,7 +455,7 @@ main:
        clear b32 $r2
        bset $r2 31
        iowr I[$r1 + 0x000] $r2         // CC_SCRATCH[0] |= 0x80000000
-       bra main
+       bra #main
 
 // interrupt handler
 ih:
@@ -427,13 +472,13 @@ ih:
        // incoming fifo command?
        iord $r10 I[$r0 + 0x200]        // INTR
        and $r11 $r10 0x00000004
-       bra e ih_no_fifo
+       bra e #ih_no_fifo
                // queue incoming fifo command for later processing
                mov $r11 0x1900
-               mov $r13 cmd_queue
+               mov $r13 #cmd_queue
                iord $r14 I[$r11 + 0x100]       // FIFO_CMD
                iord $r15 I[$r11 + 0x000]       // FIFO_DATA
-               call queue_put
+               call #queue_put
                add b32 $r11 0x400
                mov $r14 1
                iowr I[$r11 + 0x000] $r14       // FIFO_ACK
@@ -441,18 +486,18 @@ ih:
        // context switch request?
        ih_no_fifo:
        and $r11 $r10 0x00000100
-       bra e ih_no_ctxsw
+       bra e #ih_no_ctxsw
                // enqueue a context switch for later processing
-               mov $r13 cmd_queue
+               mov $r13 #cmd_queue
                mov $r14 0x4001
-               call queue_put
+               call #queue_put
 
        // anything we didn't handle, bring it to the host's attention
        ih_no_ctxsw:
        mov $r11 0x104
        not b32 $r11
        and $r11 $r10 $r11
-       bra e ih_no_other
+       bra e #ih_no_other
                mov $r10 0xc1c
                shl b32 $r10 6
                iowr I[$r10] $r11       // INTR_UP_SET
@@ -478,11 +523,11 @@ ctx_4160s:
        mov $r14 0x4160
        sethi $r14 0x400000
        mov $r15 1
-       call nv_wr32
+       call #nv_wr32
        ctx_4160s_wait:
-               call nv_rd32
+               call #nv_rd32
                xbit $r15 $r15 4
-               bra e ctx_4160s_wait
+               bra e #ctx_4160s_wait
        ret
 
 // Without clearing again at end of xfer, some things cause PGRAPH
@@ -492,7 +537,7 @@ ctx_4160c:
        mov $r14 0x4160
        sethi $r14 0x400000
        clear b32 $r15
-       call nv_wr32
+       call #nv_wr32
        ret
 
 // Again, not real sure
@@ -503,7 +548,7 @@ ctx_4170s:
        mov $r14 0x4170
        sethi $r14 0x400000
        or $r15 0x10
-       call nv_wr32
+       call #nv_wr32
        ret
 
 // Waits for a ctx_4170s() call to complete
@@ -511,9 +556,9 @@ ctx_4170s:
 ctx_4170w:
        mov $r14 0x4170
        sethi $r14 0x400000
-       call nv_rd32
+       call #nv_rd32
        and $r15 0x10
-       bra ne ctx_4170w
+       bra ne #ctx_4170w
        ret
 
 // Disables various things, waits a bit, and re-enables them..
@@ -530,7 +575,7 @@ ctx_redswitch:
        mov $r15 8
        ctx_redswitch_delay:
                sub b32 $r15 1
-               bra ne ctx_redswitch_delay
+               bra ne #ctx_redswitch_delay
        mov $r15 0x770
        iowr I[$r14] $r15       // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
        ret
@@ -546,10 +591,10 @@ ctx_86c:
        iowr I[$r14] $r15       // HUB(0x86c) = val
        mov $r14 -0x75ec
        sethi $r14 0x400000
-       call nv_wr32            // ROP(0xa14) = val
+       call #nv_wr32           // ROP(0xa14) = val
        mov $r14 -0x5794
        sethi $r14 0x410000
-       call nv_wr32            // GPC(0x86c) = val
+       call #nv_wr32           // GPC(0x86c) = val
        ret
 
 // ctx_load - load's a channel's ctxctl data, and selects its vm
@@ -561,7 +606,7 @@ ctx_load:
 
        // switch to channel, somewhat magic in parts..
        mov $r10 12             // DONE_UNK12
-       call wait_donez
+       call #wait_donez
        mov $r1 0xa24
        shl b32 $r1 6
        iowr I[$r1 + 0x000] $r0 // 0x409a24
@@ -576,7 +621,7 @@ ctx_load:
        ctx_chan_wait_0:
                iord $r4 I[$r1 + 0x100]
                and $r4 0x1f
-               bra ne ctx_chan_wait_0
+               bra ne #ctx_chan_wait_0
        iowr I[$r3 + 0x000] $r2 // CHAN_CUR
 
        // load channel header, fetch PGRAPH context pointer
@@ -595,19 +640,19 @@ ctx_load:
        sethi $r2 0x80000000
        iowr I[$r1 + 0x000] $r2         // MEM_TARGET = vram
        mov $r1 0x10                    // chan + 0x0210
-       mov $r2 xfer_data
+       mov $r2 #xfer_data
        sethi $r2 0x00020000            // 16 bytes
        xdld $r1 $r2
        xdwait
        trace_clr(T_LCHAN)
 
        // update current context
-       ld b32 $r1 D[$r0 + xfer_data + 4]
+       ld b32 $r1 D[$r0 + #xfer_data + 4]
        shl b32 $r1 24
-       ld b32 $r2 D[$r0 + xfer_data + 0]
+       ld b32 $r2 D[$r0 + #xfer_data + 0]
        shr b32 $r2 8
        or $r1 $r2
-       st b32 D[$r0 + ctx_current] $r1
+       st b32 D[$r0 + #ctx_current] $r1
 
        // set transfer base to start of context, and fetch context header
        trace_set(T_LCTXH)
@@ -618,7 +663,7 @@ ctx_load:
        mov $r1 0xa20
        shl b32 $r1 6
        iowr I[$r1 + 0x000] $r2         // MEM_TARGET = vm
-       mov $r1 chan_data
+       mov $r1 #chan_data
        sethi $r1 0x00060000            // 256 bytes
        xdld $r0 $r1
        xdwait
@@ -635,10 +680,10 @@ ctx_load:
 // In: $r2 channel address
 //
 ctx_chan:
-       call ctx_4160s
-       call ctx_load
+       call #ctx_4160s
+       call #ctx_load
        mov $r10 12                     // DONE_UNK12
-       call wait_donez
+       call #wait_donez
        mov $r1 0xa10
        shl b32 $r1 6
        mov $r2 5
@@ -646,8 +691,8 @@ ctx_chan:
        ctx_chan_wait:
                iord $r2 I[$r1 + 0x000]
                or $r2 $r2
-               bra ne ctx_chan_wait
-       call ctx_4160c
+               bra ne #ctx_chan_wait
+       call #ctx_4160c
        ret
 
 // Execute per-context state overrides list
@@ -661,7 +706,7 @@ ctx_chan:
 //
 ctx_mmio_exec:
        // set transfer base to be the mmio list
-       ld b32 $r3 D[$r0 + chan_mmio_address]
+       ld b32 $r3 D[$r0 + #chan_mmio_address]
        mov $r2 0xa04
        shl b32 $r2 6
        iowr I[$r2 + 0x000] $r3         // MEM_BASE
@@ -670,31 +715,31 @@ ctx_mmio_exec:
        ctx_mmio_loop:
                // fetch next 256 bytes of mmio list if necessary
                and $r4 $r3 0xff
-               bra ne ctx_mmio_pull
-                       mov $r5 xfer_data
+               bra ne #ctx_mmio_pull
+                       mov $r5 #xfer_data
                        sethi $r5 0x00060000    // 256 bytes
                        xdld $r3 $r5
                        xdwait
 
                // execute a single list entry
                ctx_mmio_pull:
-               ld b32 $r14 D[$r4 + xfer_data + 0x00]
-               ld b32 $r15 D[$r4 + xfer_data + 0x04]
-               call nv_wr32
+               ld b32 $r14 D[$r4 + #xfer_data + 0x00]
+               ld b32 $r15 D[$r4 + #xfer_data + 0x04]
+               call #nv_wr32
 
                // next!
                add b32 $r3 8
                sub b32 $r1 1
-               bra ne ctx_mmio_loop
+               bra ne #ctx_mmio_loop
 
        // set transfer base back to the current context
        ctx_mmio_done:
-       ld b32 $r3 D[$r0 + ctx_current]
+       ld b32 $r3 D[$r0 + #ctx_current]
        iowr I[$r2 + 0x000] $r3         // MEM_BASE
 
        // disable the mmio list now, we don't need/want to execute it again
-       st b32 D[$r0 + chan_mmio_count] $r0
-       mov $r1 chan_data
+       st b32 D[$r0 + #chan_mmio_count] $r0
+       mov $r1 #chan_data
        sethi $r1 0x00060000            // 256 bytes
        xdst $r0 $r1
        xdwait
@@ -709,46 +754,46 @@ ctx_mmio_exec:
 //             on load it means: "a save preceeded this load"
 //
 ctx_xfer:
-       bra not $p1 ctx_xfer_pre
-       bra $p2 ctx_xfer_pre_load
+       bra not $p1 #ctx_xfer_pre
+       bra $p2 #ctx_xfer_pre_load
        ctx_xfer_pre:
                mov $r15 0x10
-               call ctx_86c
-               call ctx_4160s
-               bra not $p1 ctx_xfer_exec
+               call #ctx_86c
+               call #ctx_4160s
+               bra not $p1 #ctx_xfer_exec
 
        ctx_xfer_pre_load:
                mov $r15 2
-               call ctx_4170s
-               call ctx_4170w
-               call ctx_redswitch
+               call #ctx_4170s
+               call #ctx_4170w
+               call #ctx_redswitch
                clear b32 $r15
-               call ctx_4170s
-               call ctx_load
+               call #ctx_4170s
+               call #ctx_load
 
        // fetch context pointer, and initiate xfer on all GPCs
        ctx_xfer_exec:
-       ld b32 $r1 D[$r0 + ctx_current]
+       ld b32 $r1 D[$r0 + #ctx_current]
        mov $r2 0x414
        shl b32 $r2 6
        iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset
        mov $r14 -0x5b00
        sethi $r14 0x410000
        mov b32 $r15 $r1
-       call nv_wr32            // GPC_BCAST_WRCMD_DATA = ctx pointer
+       call #nv_wr32           // GPC_BCAST_WRCMD_DATA = ctx pointer
        add b32 $r14 4
        xbit $r15 $flags $p1
        xbit $r2 $flags $p2
        shl b32 $r2 1
        or $r15 $r2
-       call nv_wr32            // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
+       call #nv_wr32           // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
 
        // strands
        mov $r1 0x4afc
        sethi $r1 0x20000
        mov $r2 0xc
        iowr I[$r1] $r2         // STRAND_CMD(0x3f) = 0x0c
-       call strand_wait
+       call #strand_wait
        mov $r2 0x47fc
        sethi $r2 0x20000
        iowr I[$r2] $r0         // STRAND_FIRST_GENE(0x3f) = 0x00
@@ -760,22 +805,22 @@ ctx_xfer:
        xbit $r10 $flags $p1    // direction
        or $r10 6               // first, last
        mov $r11 0              // base = 0
-       ld b32 $r12 D[$r0 + hub_mmio_list_head]
-       ld b32 $r13 D[$r0 + hub_mmio_list_tail]
+       ld b32 $r12 D[$r0 + #hub_mmio_list_head]
+       ld b32 $r13 D[$r0 + #hub_mmio_list_tail]
        mov $r14 0              // not multi
-       call mmctx_xfer
+       call #mmctx_xfer
 
        // wait for GPCs to all complete
        mov $r10 8              // DONE_BAR
-       call wait_doneo
+       call #wait_doneo
 
        // wait for strand xfer to complete
-       call strand_wait
+       call #strand_wait
 
        // post-op
-       bra $p1 ctx_xfer_post
+       bra $p1 #ctx_xfer_post
                mov $r10 12             // DONE_UNK12
-               call wait_donez
+               call #wait_donez
                mov $r1 0xa10
                shl b32 $r1 6
                mov $r2 5
@@ -783,27 +828,27 @@ ctx_xfer:
                ctx_xfer_post_save_wait:
                        iord $r2 I[$r1]
                        or $r2 $r2
-                       bra ne ctx_xfer_post_save_wait
+                       bra ne #ctx_xfer_post_save_wait
 
-       bra $p2 ctx_xfer_done
+       bra $p2 #ctx_xfer_done
        ctx_xfer_post:
                mov $r15 2
-               call ctx_4170s
+               call #ctx_4170s
                clear b32 $r15
-               call ctx_86c
-               call strand_post
-               call ctx_4170w
+               call #ctx_86c
+               call #strand_post
+               call #ctx_4170w
                clear b32 $r15
-               call ctx_4170s
+               call #ctx_4170s
 
-               bra not $p1 ctx_xfer_no_post_mmio
-               ld b32 $r1 D[$r0 + chan_mmio_count]
+               bra not $p1 #ctx_xfer_no_post_mmio
+               ld b32 $r1 D[$r0 + #chan_mmio_count]
                or $r1 $r1
-               bra e ctx_xfer_no_post_mmio
-                       call ctx_mmio_exec
+               bra e #ctx_xfer_no_post_mmio
+                       call #ctx_mmio_exec
 
                ctx_xfer_no_post_mmio:
-               call ctx_4160c
+               call #ctx_4160c
 
        ctx_xfer_done:
        ret
index 241d326..c5ed307 100644 (file)
@@ -23,19 +23,21 @@ uint32_t nvc0_grhub_data[] = {
        0x00000000,
        0x00000000,
        0x000000c0,
-       0x01340098,
+       0x013c00a0,
        0x000000c1,
-       0x01380098,
+       0x014000a0,
        0x000000c3,
-       0x01340098,
+       0x013c00a0,
        0x000000c4,
-       0x01340098,
+       0x013c00a0,
        0x000000c8,
-       0x01340098,
+       0x013c00a0,
        0x000000ce,
-       0x01340098,
+       0x013c00a0,
        0x000000cf,
-       0x01340098,
+       0x013c00a0,
+       0x000000d9,
+       0x01dc0140,
        0x00000000,
        0x0417e91c,
        0x04400204,
@@ -77,47 +79,45 @@ uint32_t nvc0_grhub_data[] = {
        0x0c408900,
        0x00408980,
        0x044064c0,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
+       0x0417e91c,
+       0x04400204,
+       0x24404004,
+       0x00404044,
+       0x34404094,
+       0x184040d0,
+       0x004040f8,
+       0x08404130,
+       0x08404150,
+       0x04404164,
+       0x04404178,
+       0x1c404200,
+       0x34404404,
+       0x0c404460,
+       0x00404480,
+       0x00404498,
+       0x0c404604,
+       0x7c404618,
+       0x50404698,
+       0x044046f0,
+       0x54404700,
+       0x00405800,
+       0x08405830,
+       0x00405854,
+       0x0c405870,
+       0x04405a00,
+       0x00405a18,
+       0x00406020,
+       0x0c406028,
+       0x044064a8,
+       0x104064b4,
+       0x00407804,
+       0x1440780c,
+       0x004078bc,
+       0x18408000,
+       0x00408064,
+       0x08408800,
+       0x0c408900,
+       0x00408980,
        0x00000000,
        0x00000000,
        0x00000000,
index 929aded..e9992f6 100644 (file)
@@ -153,3 +153,240 @@ nvc0_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
        perflvl->vdec   = read_clk(dev, 0x0e);
        return 0;
 }
+
+struct nvc0_pm_clock {
+       u32 freq;
+       u32 ssel;
+       u32 mdiv;
+       u32 dsrc;
+       u32 ddiv;
+       u32 coef;
+};
+
+struct nvc0_pm_state {
+       struct nvc0_pm_clock eng[16];
+};
+
+static u32
+calc_div(struct drm_device *dev, int clk, u32 ref, u32 freq, u32 *ddiv)
+{
+       u32 div = min((ref * 2) / freq, (u32)65);
+       if (div < 2)
+               div = 2;
+
+       *ddiv = div - 2;
+       return (ref * 2) / div;
+}
+
+static u32
+calc_src(struct drm_device *dev, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+{
+       u32 sclk;
+
+       /* use one of the fixed frequencies if possible */
+       *ddiv = 0x00000000;
+       switch (freq) {
+       case  27000:
+       case 108000:
+               *dsrc = 0x00000000;
+               if (freq == 108000)
+                       *dsrc |= 0x00030000;
+               return freq;
+       case 100000:
+               *dsrc = 0x00000002;
+               return freq;
+       default:
+               *dsrc = 0x00000003;
+               break;
+       }
+
+       /* otherwise, calculate the closest divider */
+       sclk = read_vco(dev, clk);
+       if (clk < 7)
+               sclk = calc_div(dev, clk, sclk, freq, ddiv);
+       return sclk;
+}
+
+static u32
+calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef)
+{
+       struct pll_lims limits;
+       int N, M, P, ret;
+
+       ret = get_pll_limits(dev, 0x137000 + (clk * 0x20), &limits);
+       if (ret)
+               return 0;
+
+       limits.refclk = read_div(dev, clk, 0x137120, 0x137140);
+       if (!limits.refclk)
+               return 0;
+
+       ret = nva3_calc_pll(dev, &limits, freq, &N, NULL, &M, &P);
+       if (ret <= 0)
+               return 0;
+
+       *coef = (P << 16) | (N << 8) | M;
+       return ret;
+}
+
+/* A (likely rather simplified and incomplete) view of the clock tree
+ *
+ * Key:
+ *
+ * S: source select
+ * D: divider
+ * P: pll
+ * F: switch
+ *
+ * Engine clocks:
+ *
+ * 137250(D) ---- 137100(F0) ---- 137160(S)/1371d0(D) ------------------- ref
+ *                      (F1) ---- 1370X0(P) ---- 137120(S)/137140(D) ---- ref
+ *
+ * Not all registers exist for all clocks.  For example: clocks >= 8 don't
+ * have their own PLL (all tied to clock 7's PLL when in PLL mode), nor do
+ * they have the divider at 1371d0, though the source selection at 137160
+ * still exists.  You must use the divider at 137250 for these instead.
+ *
+ * Memory clock:
+ *
+ * TBD, read_mem() above is likely very wrong...
+ *
+ */
+
+static int
+calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
+{
+       u32 src0, div0, div1D, div1P = 0;
+       u32 clk0, clk1 = 0;
+
+       /* invalid clock domain */
+       if (!freq)
+               return 0;
+
+       /* first possible path, using only dividers */
+       clk0 = calc_src(dev, clk, freq, &src0, &div0);
+       clk0 = calc_div(dev, clk, clk0, freq, &div1D);
+
+       /* see if we can get any closer using PLLs */
+       if (clk0 != freq) {
+               if (clk < 7)
+                       clk1 = calc_pll(dev, clk, freq, &info->coef);
+               else
+                       clk1 = read_pll(dev, 0x1370e0);
+               clk1 = calc_div(dev, clk, clk1, freq, &div1P);
+       }
+
+       /* select the method which gets closest to target freq */
+       if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
+               info->dsrc = src0;
+               if (div0) {
+                       info->ddiv |= 0x80000000;
+                       info->ddiv |= div0 << 8;
+                       info->ddiv |= div0;
+               }
+               if (div1D) {
+                       info->mdiv |= 0x80000000;
+                       info->mdiv |= div1D;
+               }
+               info->ssel = 0;
+               info->freq = clk0;
+       } else {
+               if (div1P) {
+                       info->mdiv |= 0x80000000;
+                       info->mdiv |= div1P << 8;
+               }
+               info->ssel = (1 << clk);
+               info->freq = clk1;
+       }
+
+       return 0;
+}
+
+void *
+nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nvc0_pm_state *info;
+       int ret;
+
+       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       if (!info)
+               return ERR_PTR(-ENOMEM);
+
+       /* NFI why this is still in the performance table, the ROPCs appear
+        * to get their clock from clock 2 ("hub07", actually hub05 on this
+        * chip, but, anyway...) as well.  nvatiming confirms hub05 and ROP
+        * are always the same freq with the binary driver even when the
+        * performance table says they should differ.
+        */
+       if (dev_priv->chipset == 0xd9)
+               perflvl->rop = 0;
+
+       if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) ||
+           (ret = calc_clk(dev, 0x01, &info->eng[0x01], perflvl->rop)) ||
+           (ret = calc_clk(dev, 0x02, &info->eng[0x02], perflvl->hub07)) ||
+           (ret = calc_clk(dev, 0x07, &info->eng[0x07], perflvl->hub06)) ||
+           (ret = calc_clk(dev, 0x08, &info->eng[0x08], perflvl->hub01)) ||
+           (ret = calc_clk(dev, 0x09, &info->eng[0x09], perflvl->copy)) ||
+           (ret = calc_clk(dev, 0x0c, &info->eng[0x0c], perflvl->daemon)) ||
+           (ret = calc_clk(dev, 0x0e, &info->eng[0x0e], perflvl->vdec))) {
+               kfree(info);
+               return ERR_PTR(ret);
+       }
+
+       return info;
+}
+
+static void
+prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
+{
+       /* program dividers at 137160/1371d0 first */
+       if (clk < 7 && !info->ssel) {
+               nv_mask(dev, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
+               nv_wr32(dev, 0x137160 + (clk * 0x04), info->dsrc);
+       }
+
+       /* switch clock to non-pll mode */
+       nv_mask(dev, 0x137100, (1 << clk), 0x00000000);
+       nv_wait(dev, 0x137100, (1 << clk), 0x00000000);
+
+       /* reprogram pll */
+       if (clk < 7) {
+               /* make sure it's disabled first... */
+               u32 base = 0x137000 + (clk * 0x20);
+               u32 ctrl = nv_rd32(dev, base + 0x00);
+               if (ctrl & 0x00000001) {
+                       nv_mask(dev, base + 0x00, 0x00000004, 0x00000000);
+                       nv_mask(dev, base + 0x00, 0x00000001, 0x00000000);
+               }
+               /* program it to new values, if necessary */
+               if (info->ssel) {
+                       nv_wr32(dev, base + 0x04, info->coef);
+                       nv_mask(dev, base + 0x00, 0x00000001, 0x00000001);
+                       nv_wait(dev, base + 0x00, 0x00020000, 0x00020000);
+                       nv_mask(dev, base + 0x00, 0x00020004, 0x00000004);
+               }
+       }
+
+       /* select pll/non-pll mode, and program final clock divider */
+       nv_mask(dev, 0x137100, (1 << clk), info->ssel);
+       nv_wait(dev, 0x137100, (1 << clk), info->ssel);
+       nv_mask(dev, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
+}
+
+int
+nvc0_pm_clocks_set(struct drm_device *dev, void *data)
+{
+       struct nvc0_pm_state *info = data;
+       int i;
+
+       for (i = 0; i < 16; i++) {
+               if (!info->eng[i].freq)
+                       continue;
+               prog_clk(dev, i, &info->eng[i]);
+       }
+
+       kfree(info);
+       return 0;
+}
index cb006a7..d2ba2f0 100644 (file)
 #include "nouveau_fb.h"
 #include "nv50_display.h"
 
+#define EVO_DMA_NR 9
+
+#define EVO_MASTER  (0x00)
+#define EVO_FLIP(c) (0x01 + (c))
+#define EVO_OVLY(c) (0x05 + (c))
+#define EVO_OIMM(c) (0x09 + (c))
+#define EVO_CURS(c) (0x0d + (c))
+
+/* offsets in shared sync bo of various structures */
+#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
+#define EVO_MAST_NTFY     EVO_SYNC(  0, 0x00)
+#define EVO_FLIP_SEM0(c)  EVO_SYNC((c), 0x00)
+#define EVO_FLIP_SEM1(c)  EVO_SYNC((c), 0x10)
+
+struct evo {
+       int idx;
+       dma_addr_t handle;
+       u32 *ptr;
+       struct {
+               u32 offset;
+               u16 value;
+       } sem;
+};
+
 struct nvd0_display {
        struct nouveau_gpuobj *mem;
-       struct {
-               dma_addr_t handle;
-               u32 *ptr;
-       } evo[1];
+       struct nouveau_bo *sync;
+       struct evo evo[9];
 
        struct tasklet_struct tasklet;
        u32 modeset;
@@ -53,6 +75,15 @@ nvd0_display(struct drm_device *dev)
        return dev_priv->engine.display.priv;
 }
 
+static struct drm_crtc *
+nvd0_display_crtc_get(struct drm_encoder *encoder)
+{
+       return nouveau_encoder(encoder)->crtc;
+}
+
+/******************************************************************************
+ * EVO channel helpers
+ *****************************************************************************/
 static inline int
 evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
 {
@@ -84,6 +115,9 @@ evo_wait(struct drm_device *dev, int id, int nr)
                put = 0;
        }
 
+       if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
+               NV_INFO(dev, "Evo%d: %p START\n", id, disp->evo[id].ptr + put);
+
        return disp->evo[id].ptr + put;
 }
 
@@ -91,40 +125,264 @@ static void
 evo_kick(u32 *push, struct drm_device *dev, int id)
 {
        struct nvd0_display *disp = nvd0_display(dev);
+
+       if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) {
+               u32 curp = nv_rd32(dev, 0x640000 + (id * 0x1000)) >> 2;
+               u32 *cur = disp->evo[id].ptr + curp;
+
+               while (cur < push)
+                       NV_INFO(dev, "Evo%d: 0x%08x\n", id, *cur++);
+               NV_INFO(dev, "Evo%d: %p KICK!\n", id, push);
+       }
+
        nv_wr32(dev, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
 }
 
 #define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
 #define evo_data(p,d)   *((p)++) = (d)
 
-static struct drm_crtc *
-nvd0_display_crtc_get(struct drm_encoder *encoder)
+static int
+evo_init_dma(struct drm_device *dev, int ch)
 {
-       return nouveau_encoder(encoder)->crtc;
+       struct nvd0_display *disp = nvd0_display(dev);
+       u32 flags;
+
+       flags = 0x00000000;
+       if (ch == EVO_MASTER)
+               flags |= 0x01000000;
+
+       nv_wr32(dev, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3);
+       nv_wr32(dev, 0x610498 + (ch * 0x0010), 0x00010000);
+       nv_wr32(dev, 0x61049c + (ch * 0x0010), 0x00000001);
+       nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
+       nv_wr32(dev, 0x640000 + (ch * 0x1000), 0x00000000);
+       nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000013 | flags);
+       if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) {
+               NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch,
+                             nv_rd32(dev, 0x610490 + (ch * 0x0010)));
+               return -EBUSY;
+       }
+
+       nv_mask(dev, 0x610090, (1 << ch), (1 << ch));
+       nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch));
+       return 0;
+}
+
+static void
+evo_fini_dma(struct drm_device *dev, int ch)
+{
+       if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000010))
+               return;
+
+       nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000);
+       nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000);
+       nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000);
+       nv_mask(dev, 0x610090, (1 << ch), 0x00000000);
+       nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000);
+}
+
+static inline void
+evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data)
+{
+       nv_wr32(dev, 0x640000 + (ch * 0x1000) + mthd, data);
+}
+
+static int
+evo_init_pio(struct drm_device *dev, int ch)
+{
+       nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000001);
+       if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) {
+               NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch,
+                             nv_rd32(dev, 0x610490 + (ch * 0x0010)));
+               return -EBUSY;
+       }
+
+       nv_mask(dev, 0x610090, (1 << ch), (1 << ch));
+       nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch));
+       return 0;
+}
+
+static void
+evo_fini_pio(struct drm_device *dev, int ch)
+{
+       if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000001))
+               return;
+
+       nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
+       nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000);
+       nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000);
+       nv_mask(dev, 0x610090, (1 << ch), 0x00000000);
+       nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000);
+}
+
+static bool
+evo_sync_wait(void *data)
+{
+       return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
+}
+
+static int
+evo_sync(struct drm_device *dev, int ch)
+{
+       struct nvd0_display *disp = nvd0_display(dev);
+       u32 *push = evo_wait(dev, ch, 8);
+       if (push) {
+               nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
+               evo_mthd(push, 0x0084, 1);
+               evo_data(push, 0x80000000 | EVO_MAST_NTFY);
+               evo_mthd(push, 0x0080, 2);
+               evo_data(push, 0x00000000);
+               evo_data(push, 0x00000000);
+               evo_kick(push, dev, ch);
+               if (nv_wait_cb(dev, evo_sync_wait, disp->sync))
+                       return 0;
+       }
+
+       return -EBUSY;
+}
+
+/******************************************************************************
+ * Page flipping channel
+ *****************************************************************************/
+struct nouveau_bo *
+nvd0_display_crtc_sema(struct drm_device *dev, int crtc)
+{
+       return nvd0_display(dev)->sync;
+}
+
+void
+nvd0_display_flip_stop(struct drm_crtc *crtc)
+{
+       struct nvd0_display *disp = nvd0_display(crtc->dev);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
+       u32 *push;
+
+       push = evo_wait(crtc->dev, evo->idx, 8);
+       if (push) {
+               evo_mthd(push, 0x0084, 1);
+               evo_data(push, 0x00000000);
+               evo_mthd(push, 0x0094, 1);
+               evo_data(push, 0x00000000);
+               evo_mthd(push, 0x00c0, 1);
+               evo_data(push, 0x00000000);
+               evo_mthd(push, 0x0080, 1);
+               evo_data(push, 0x00000000);
+               evo_kick(push, crtc->dev, evo->idx);
+       }
+}
+
+int
+nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+                      struct nouveau_channel *chan, u32 swap_interval)
+{
+       struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
+       struct nvd0_display *disp = nvd0_display(crtc->dev);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
+       u64 offset;
+       u32 *push;
+       int ret;
+
+       swap_interval <<= 4;
+       if (swap_interval == 0)
+               swap_interval |= 0x100;
+
+       push = evo_wait(crtc->dev, evo->idx, 128);
+       if (unlikely(push == NULL))
+               return -EBUSY;
+
+       /* synchronise with the rendering channel, if necessary */
+       if (likely(chan)) {
+               ret = RING_SPACE(chan, 10);
+               if (ret)
+                       return ret;
+
+               offset  = chan->dispc_vma[nv_crtc->index].offset;
+               offset += evo->sem.offset;
+
+               BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+               OUT_RING  (chan, upper_32_bits(offset));
+               OUT_RING  (chan, lower_32_bits(offset));
+               OUT_RING  (chan, 0xf00d0000 | evo->sem.value);
+               OUT_RING  (chan, 0x1002);
+               BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+               OUT_RING  (chan, upper_32_bits(offset));
+               OUT_RING  (chan, lower_32_bits(offset ^ 0x10));
+               OUT_RING  (chan, 0x74b1e000);
+               OUT_RING  (chan, 0x1001);
+               FIRE_RING (chan);
+       } else {
+               nouveau_bo_wr32(disp->sync, evo->sem.offset / 4,
+                               0xf00d0000 | evo->sem.value);
+               evo_sync(crtc->dev, EVO_MASTER);
+       }
+
+       /* queue the flip */
+       evo_mthd(push, 0x0100, 1);
+       evo_data(push, 0xfffe0000);
+       evo_mthd(push, 0x0084, 1);
+       evo_data(push, swap_interval);
+       if (!(swap_interval & 0x00000100)) {
+               evo_mthd(push, 0x00e0, 1);
+               evo_data(push, 0x40000000);
+       }
+       evo_mthd(push, 0x0088, 4);
+       evo_data(push, evo->sem.offset);
+       evo_data(push, 0xf00d0000 | evo->sem.value);
+       evo_data(push, 0x74b1e000);
+       evo_data(push, NvEvoSync);
+       evo_mthd(push, 0x00a0, 2);
+       evo_data(push, 0x00000000);
+       evo_data(push, 0x00000000);
+       evo_mthd(push, 0x00c0, 1);
+       evo_data(push, nv_fb->r_dma);
+       evo_mthd(push, 0x0110, 2);
+       evo_data(push, 0x00000000);
+       evo_data(push, 0x00000000);
+       evo_mthd(push, 0x0400, 5);
+       evo_data(push, nv_fb->nvbo->bo.offset >> 8);
+       evo_data(push, 0);
+       evo_data(push, (fb->height << 16) | fb->width);
+       evo_data(push, nv_fb->r_pitch);
+       evo_data(push, nv_fb->r_format);
+       evo_mthd(push, 0x0080, 1);
+       evo_data(push, 0x00000000);
+       evo_kick(push, crtc->dev, evo->idx);
+
+       evo->sem.offset ^= 0x10;
+       evo->sem.value++;
+       return 0;
 }
 
 /******************************************************************************
  * CRTC
  *****************************************************************************/
 static int
-nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
+nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
 {
        struct drm_device *dev = nv_crtc->base.dev;
-       u32 *push, mode;
+       struct nouveau_connector *nv_connector;
+       struct drm_connector *connector;
+       u32 *push, mode = 0x00;
 
-       mode = 0x00000000;
-       if (on) {
-               /* 0x11: 6bpc dynamic 2x2
-                * 0x13: 8bpc dynamic 2x2
-                * 0x19: 6bpc static 2x2
-                * 0x1b: 8bpc static 2x2
-                * 0x21: 6bpc temporal
-                * 0x23: 8bpc temporal
-                */
-               mode = 0x00000011;
+       nv_connector = nouveau_crtc_connector_get(nv_crtc);
+       connector = &nv_connector->base;
+       if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
+               if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
+                       mode = DITHERING_MODE_DYNAMIC2X2;
+       } else {
+               mode = nv_connector->dithering_mode;
+       }
+
+       if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
+               if (connector->display_info.bpc >= 8)
+                       mode |= DITHERING_DEPTH_8BPC;
+       } else {
+               mode |= nv_connector->dithering_depth;
        }
 
-       push = evo_wait(dev, 0, 4);
+       push = evo_wait(dev, EVO_MASTER, 4);
        if (push) {
                evo_mthd(push, 0x0490 + (nv_crtc->index * 0x300), 1);
                evo_data(push, mode);
@@ -132,63 +390,98 @@ nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
                        evo_mthd(push, 0x0080, 1);
                        evo_data(push, 0x00000000);
                }
-               evo_kick(push, dev, 0);
+               evo_kick(push, dev, EVO_MASTER);
        }
 
        return 0;
 }
 
 static int
-nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, int type, bool update)
+nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
 {
-       struct drm_display_mode *mode = &nv_crtc->base.mode;
+       struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
        struct drm_device *dev = nv_crtc->base.dev;
+       struct drm_crtc *crtc = &nv_crtc->base;
        struct nouveau_connector *nv_connector;
-       u32 *push, outX, outY;
-
-       outX = mode->hdisplay;
-       outY = mode->vdisplay;
+       int mode = DRM_MODE_SCALE_NONE;
+       u32 oX, oY, *push;
 
+       /* start off at the resolution we programmed the crtc for, this
+        * effectively handles NONE/FULL scaling
+        */
        nv_connector = nouveau_crtc_connector_get(nv_crtc);
-       if (nv_connector && nv_connector->native_mode) {
-               struct drm_display_mode *native = nv_connector->native_mode;
-               u32 xratio = (native->hdisplay << 19) / mode->hdisplay;
-               u32 yratio = (native->vdisplay << 19) / mode->vdisplay;
-
-               switch (type) {
-               case DRM_MODE_SCALE_ASPECT:
-                       if (xratio > yratio) {
-                               outX = (mode->hdisplay * yratio) >> 19;
-                               outY = (mode->vdisplay * yratio) >> 19;
-                       } else {
-                               outX = (mode->hdisplay * xratio) >> 19;
-                               outY = (mode->vdisplay * xratio) >> 19;
-                       }
-                       break;
-               case DRM_MODE_SCALE_FULLSCREEN:
-                       outX = native->hdisplay;
-                       outY = native->vdisplay;
-                       break;
-               default:
-                       break;
+       if (nv_connector && nv_connector->native_mode)
+               mode = nv_connector->scaling_mode;
+
+       if (mode != DRM_MODE_SCALE_NONE)
+               omode = nv_connector->native_mode;
+       else
+               omode = umode;
+
+       oX = omode->hdisplay;
+       oY = omode->vdisplay;
+       if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
+               oY *= 2;
+
+       /* add overscan compensation if necessary, will keep the aspect
+        * ratio the same as the backend mode unless overridden by the
+        * user setting both hborder and vborder properties.
+        */
+       if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
+                            (nv_connector->underscan == UNDERSCAN_AUTO &&
+                             nv_connector->edid &&
+                             drm_detect_hdmi_monitor(nv_connector->edid)))) {
+               u32 bX = nv_connector->underscan_hborder;
+               u32 bY = nv_connector->underscan_vborder;
+               u32 aspect = (oY << 19) / oX;
+
+               if (bX) {
+                       oX -= (bX * 2);
+                       if (bY) oY -= (bY * 2);
+                       else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
+               } else {
+                       oX -= (oX >> 4) + 32;
+                       if (bY) oY -= (bY * 2);
+                       else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
+               }
+       }
+
+       /* handle CENTER/ASPECT scaling, taking into account the areas
+        * removed already for overscan compensation
+        */
+       switch (mode) {
+       case DRM_MODE_SCALE_CENTER:
+               oX = min((u32)umode->hdisplay, oX);
+               oY = min((u32)umode->vdisplay, oY);
+               /* fall-through */
+       case DRM_MODE_SCALE_ASPECT:
+               if (oY < oX) {
+                       u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
+                       oX = ((oY * aspect) + (aspect / 2)) >> 19;
+               } else {
+                       u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
+                       oY = ((oX * aspect) + (aspect / 2)) >> 19;
                }
+               break;
+       default:
+               break;
        }
 
-       push = evo_wait(dev, 0, 16);
+       push = evo_wait(dev, EVO_MASTER, 8);
        if (push) {
                evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
-               evo_data(push, (outY << 16) | outX);
-               evo_data(push, (outY << 16) | outX);
-               evo_data(push, (outY << 16) | outX);
+               evo_data(push, (oY << 16) | oX);
+               evo_data(push, (oY << 16) | oX);
+               evo_data(push, (oY << 16) | oX);
                evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
                evo_data(push, 0x00000000);
                evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
-               evo_data(push, (mode->vdisplay << 16) | mode->hdisplay);
+               evo_data(push, (umode->vdisplay << 16) | umode->hdisplay);
+               evo_kick(push, dev, EVO_MASTER);
                if (update) {
-                       evo_mthd(push, 0x0080, 1);
-                       evo_data(push, 0x00000000);
+                       nvd0_display_flip_stop(crtc);
+                       nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
                }
-               evo_kick(push, dev, 0);
        }
 
        return 0;
@@ -201,7 +494,7 @@ nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
        struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
        u32 *push;
 
-       push = evo_wait(fb->dev, 0, 16);
+       push = evo_wait(fb->dev, EVO_MASTER, 16);
        if (push) {
                evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
                evo_data(push, nvfb->nvbo->bo.offset >> 8);
@@ -216,7 +509,7 @@ nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
                        evo_mthd(push, 0x0080, 1);
                        evo_data(push, 0x00000000);
                }
-               evo_kick(push, fb->dev, 0);
+               evo_kick(push, fb->dev, EVO_MASTER);
        }
 
        nv_crtc->fb.tile_flags = nvfb->r_dma;
@@ -227,7 +520,7 @@ static void
 nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
 {
        struct drm_device *dev = nv_crtc->base.dev;
-       u32 *push = evo_wait(dev, 0, 16);
+       u32 *push = evo_wait(dev, EVO_MASTER, 16);
        if (push) {
                if (show) {
                        evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
@@ -247,7 +540,7 @@ nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
                        evo_data(push, 0x00000000);
                }
 
-               evo_kick(push, dev, 0);
+               evo_kick(push, dev, EVO_MASTER);
        }
 }
 
@@ -262,7 +555,9 @@ nvd0_crtc_prepare(struct drm_crtc *crtc)
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
        u32 *push;
 
-       push = evo_wait(crtc->dev, 0, 2);
+       nvd0_display_flip_stop(crtc);
+
+       push = evo_wait(crtc->dev, EVO_MASTER, 2);
        if (push) {
                evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
                evo_data(push, 0x00000000);
@@ -270,7 +565,7 @@ nvd0_crtc_prepare(struct drm_crtc *crtc)
                evo_data(push, 0x03000000);
                evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
                evo_data(push, 0x00000000);
-               evo_kick(push, crtc->dev, 0);
+               evo_kick(push, crtc->dev, EVO_MASTER);
        }
 
        nvd0_crtc_cursor_show(nv_crtc, false, false);
@@ -282,7 +577,7 @@ nvd0_crtc_commit(struct drm_crtc *crtc)
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
        u32 *push;
 
-       push = evo_wait(crtc->dev, 0, 32);
+       push = evo_wait(crtc->dev, EVO_MASTER, 32);
        if (push) {
                evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
                evo_data(push, nv_crtc->fb.tile_flags);
@@ -295,10 +590,11 @@ nvd0_crtc_commit(struct drm_crtc *crtc)
                evo_data(push, NvEvoVRAM);
                evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
                evo_data(push, 0xffffff00);
-               evo_kick(push, crtc->dev, 0);
+               evo_kick(push, crtc->dev, EVO_MASTER);
        }
 
-       nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true);
+       nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, false);
+       nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
 }
 
 static bool
@@ -333,21 +629,35 @@ nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
 {
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
        struct nouveau_connector *nv_connector;
-       u32 htotal = mode->htotal;
-       u32 vtotal = mode->vtotal;
-       u32 hsyncw = mode->hsync_end - mode->hsync_start - 1;
-       u32 vsyncw = mode->vsync_end - mode->vsync_start - 1;
-       u32 hfrntp = mode->hsync_start - mode->hdisplay;
-       u32 vfrntp = mode->vsync_start - mode->vdisplay;
-       u32 hbackp = mode->htotal - mode->hsync_end;
-       u32 vbackp = mode->vtotal - mode->vsync_end;
-       u32 hss2be = hsyncw + hbackp;
-       u32 vss2be = vsyncw + vbackp;
-       u32 hss2de = htotal - hfrntp;
-       u32 vss2de = vtotal - vfrntp;
+       u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
+       u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
+       u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
+       u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
+       u32 vblan2e = 0, vblan2s = 1;
+       u32 magic = 0x31ec6000;
        u32 syncs, *push;
        int ret;
 
+       hactive = mode->htotal;
+       hsynce  = mode->hsync_end - mode->hsync_start - 1;
+       hbackp  = mode->htotal - mode->hsync_end;
+       hblanke = hsynce + hbackp;
+       hfrontp = mode->hsync_start - mode->hdisplay;
+       hblanks = mode->htotal - hfrontp - 1;
+
+       vactive = mode->vtotal * vscan / ilace;
+       vsynce  = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
+       vbackp  = (mode->vtotal - mode->vsync_end) * vscan / ilace;
+       vblanke = vsynce + vbackp;
+       vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+       vblanks = vactive - vfrontp - 1;
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+               vblan2e = vactive + vsynce + vbackp;
+               vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
+               vactive = (vactive * 2) + 1;
+               magic  |= 0x00000001;
+       }
+
        syncs = 0x00000001;
        if (mode->flags & DRM_MODE_FLAG_NHSYNC)
                syncs |= 0x00000008;
@@ -358,28 +668,33 @@ nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
        if (ret)
                return ret;
 
-       push = evo_wait(crtc->dev, 0, 64);
+       push = evo_wait(crtc->dev, EVO_MASTER, 64);
        if (push) {
-               evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 5);
+               evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
                evo_data(push, 0x00000000);
-               evo_data(push, (vtotal << 16) | htotal);
-               evo_data(push, (vsyncw << 16) | hsyncw);
-               evo_data(push, (vss2be << 16) | hss2be);
-               evo_data(push, (vss2de << 16) | hss2de);
+               evo_data(push, (vactive << 16) | hactive);
+               evo_data(push, ( vsynce << 16) | hsynce);
+               evo_data(push, (vblanke << 16) | hblanke);
+               evo_data(push, (vblanks << 16) | hblanks);
+               evo_data(push, (vblan2e << 16) | vblan2s);
                evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
                evo_data(push, 0x00000000); /* ??? */
                evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
                evo_data(push, mode->clock * 1000);
                evo_data(push, 0x00200000); /* ??? */
                evo_data(push, mode->clock * 1000);
-               evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 1);
+               evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
                evo_data(push, syncs);
-               evo_kick(push, crtc->dev, 0);
+               evo_data(push, magic);
+               evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
+               evo_data(push, 0x00000311);
+               evo_data(push, 0x00000100);
+               evo_kick(push, crtc->dev, EVO_MASTER);
        }
 
        nv_connector = nouveau_crtc_connector_get(nv_crtc);
-       nvd0_crtc_set_dither(nv_crtc, nv_connector->use_dithering, false);
-       nvd0_crtc_set_scale(nv_crtc, nv_connector->scaling_mode, false);
+       nvd0_crtc_set_dither(nv_crtc, false);
+       nvd0_crtc_set_scale(nv_crtc, false);
        nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
        return 0;
 }
@@ -400,7 +715,9 @@ nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
        if (ret)
                return ret;
 
+       nvd0_display_flip_stop(crtc);
        nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
+       nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
        return 0;
 }
 
@@ -410,6 +727,7 @@ nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
                               enum mode_set_atomic state)
 {
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       nvd0_display_flip_stop(crtc);
        nvd0_crtc_set_image(nv_crtc, fb, x, y, true);
        return 0;
 }
@@ -472,10 +790,10 @@ static int
 nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 {
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       const u32 data = (y << 16) | x;
+       int ch = EVO_CURS(nv_crtc->index);
 
-       nv_wr32(crtc->dev, 0x64d084 + (nv_crtc->index * 0x1000), data);
-       nv_wr32(crtc->dev, 0x64d080 + (nv_crtc->index * 0x1000), 0x00000000);
+       evo_piow(crtc->dev, ch, 0x0084, (y << 16) | x);
+       evo_piow(crtc->dev, ch, 0x0080, 0x00000000);
        return 0;
 }
 
@@ -525,6 +843,7 @@ static const struct drm_crtc_funcs nvd0_crtc_func = {
        .gamma_set = nvd0_crtc_gamma_set,
        .set_config = drm_crtc_helper_set_config,
        .destroy = nvd0_crtc_destroy,
+       .page_flip = nouveau_crtc_page_flip,
 };
 
 static void
@@ -659,12 +978,12 @@ nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 
        nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON);
 
-       push = evo_wait(encoder->dev, 0, 4);
+       push = evo_wait(encoder->dev, EVO_MASTER, 4);
        if (push) {
                evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 2);
                evo_data(push, 1 << nv_crtc->index);
                evo_data(push, 0x00ff);
-               evo_kick(push, encoder->dev, 0);
+               evo_kick(push, encoder->dev, EVO_MASTER);
        }
 
        nv_encoder->crtc = encoder->crtc;
@@ -680,13 +999,13 @@ nvd0_dac_disconnect(struct drm_encoder *encoder)
        if (nv_encoder->crtc) {
                nvd0_crtc_prepare(nv_encoder->crtc);
 
-               push = evo_wait(dev, 0, 4);
+               push = evo_wait(dev, EVO_MASTER, 4);
                if (push) {
                        evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1);
                        evo_data(push, 0x00000000);
                        evo_mthd(push, 0x0080, 1);
                        evo_data(push, 0x00000000);
-                       evo_kick(push, dev, 0);
+                       evo_kick(push, dev, EVO_MASTER);
                }
 
                nv_encoder->crtc = NULL;
@@ -760,6 +1079,108 @@ nvd0_dac_create(struct drm_connector *connector, struct dcb_entry *dcbe)
 }
 
 /******************************************************************************
+ * Audio
+ *****************************************************************************/
+static void
+nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_connector *nv_connector;
+       struct drm_device *dev = encoder->dev;
+       int i, or = nv_encoder->or * 0x30;
+
+       nv_connector = nouveau_encoder_connector_get(nv_encoder);
+       if (!drm_detect_monitor_audio(nv_connector->edid))
+               return;
+
+       nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000001);
+
+       drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
+       if (nv_connector->base.eld[0]) {
+               u8 *eld = nv_connector->base.eld;
+
+               for (i = 0; i < eld[2] * 4; i++)
+                       nv_wr32(dev, 0x10ec00 + or, (i << 8) | eld[i]);
+               for (i = eld[2] * 4; i < 0x60; i++)
+                       nv_wr32(dev, 0x10ec00 + or, (i << 8) | 0x00);
+
+               nv_mask(dev, 0x10ec10 + or, 0x80000002, 0x80000002);
+       }
+}
+
+static void
+nvd0_audio_disconnect(struct drm_encoder *encoder)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct drm_device *dev = encoder->dev;
+       int or = nv_encoder->or * 0x30;
+
+       nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000000);
+}
+
+/******************************************************************************
+ * HDMI
+ *****************************************************************************/
+static void
+nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+       struct nouveau_connector *nv_connector;
+       struct drm_device *dev = encoder->dev;
+       int head = nv_crtc->index * 0x800;
+       u32 rekey = 56; /* binary driver, and tegra constant */
+       u32 max_ac_packet;
+
+       nv_connector = nouveau_encoder_connector_get(nv_encoder);
+       if (!drm_detect_hdmi_monitor(nv_connector->edid))
+               return;
+
+       max_ac_packet  = mode->htotal - mode->hdisplay;
+       max_ac_packet -= rekey;
+       max_ac_packet -= 18; /* constant from tegra */
+       max_ac_packet /= 32;
+
+       /* AVI InfoFrame */
+       nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000);
+       nv_wr32(dev, 0x61671c + head, 0x000d0282);
+       nv_wr32(dev, 0x616720 + head, 0x0000006f);
+       nv_wr32(dev, 0x616724 + head, 0x00000000);
+       nv_wr32(dev, 0x616728 + head, 0x00000000);
+       nv_wr32(dev, 0x61672c + head, 0x00000000);
+       nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000001);
+
+       /* ??? InfoFrame? */
+       nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000);
+       nv_wr32(dev, 0x6167ac + head, 0x00000010);
+       nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000001);
+
+       /* HDMI_CTRL */
+       nv_mask(dev, 0x616798 + head, 0x401f007f, 0x40000000 | rekey |
+                                                 max_ac_packet << 16);
+
+       /* NFI, audio doesn't work without it though.. */
+       nv_mask(dev, 0x616548 + head, 0x00000070, 0x00000000);
+
+       nvd0_audio_mode_set(encoder, mode);
+}
+
+static void
+nvd0_hdmi_disconnect(struct drm_encoder *encoder)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
+       struct drm_device *dev = encoder->dev;
+       int head = nv_crtc->index * 0x800;
+
+       nvd0_audio_disconnect(encoder);
+
+       nv_mask(dev, 0x616798 + head, 0x40000000, 0x00000000);
+       nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000);
+       nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000);
+}
+
+/******************************************************************************
  * SOR
  *****************************************************************************/
 static void
@@ -829,7 +1250,8 @@ static void
 nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
                  struct drm_display_mode *mode)
 {
-       struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
+       struct drm_device *dev = encoder->dev;
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
        struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
        struct nouveau_connector *nv_connector;
@@ -852,6 +1274,8 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
                or_config = (mode_ctrl & 0x00000f00) >> 8;
                if (mode->clock >= 165000)
                        or_config |= 0x0100;
+
+               nvd0_hdmi_mode_set(encoder, mode);
                break;
        case OUTPUT_LVDS:
                or_config = (mode_ctrl & 0x00000f00) >> 8;
@@ -861,7 +1285,7 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
                        if (bios->fp.if_is_24bit)
                                or_config |= 0x0200;
                } else {
-                       if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG) {
+                       if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
                                if (((u8 *)nv_connector->edid)[121] == 2)
                                        or_config |= 0x0100;
                        } else
@@ -889,12 +1313,12 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
 
        nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
 
-       push = evo_wait(encoder->dev, 0, 4);
+       push = evo_wait(dev, EVO_MASTER, 4);
        if (push) {
                evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 2);
                evo_data(push, mode_ctrl);
                evo_data(push, or_config);
-               evo_kick(push, encoder->dev, 0);
+               evo_kick(push, dev, EVO_MASTER);
        }
 
        nv_encoder->crtc = encoder->crtc;
@@ -910,15 +1334,17 @@ nvd0_sor_disconnect(struct drm_encoder *encoder)
        if (nv_encoder->crtc) {
                nvd0_crtc_prepare(nv_encoder->crtc);
 
-               push = evo_wait(dev, 0, 4);
+               push = evo_wait(dev, EVO_MASTER, 4);
                if (push) {
                        evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
                        evo_data(push, 0x00000000);
                        evo_mthd(push, 0x0080, 1);
                        evo_data(push, 0x00000000);
-                       evo_kick(push, dev, 0);
+                       evo_kick(push, dev, EVO_MASTER);
                }
 
+               nvd0_hdmi_disconnect(encoder);
+
                nv_encoder->crtc = NULL;
                nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
        }
@@ -1159,6 +1585,12 @@ nvd0_display_intr(struct drm_device *dev)
        struct nvd0_display *disp = nvd0_display(dev);
        u32 intr = nv_rd32(dev, 0x610088);
 
+       if (intr & 0x00000001) {
+               u32 stat = nv_rd32(dev, 0x61008c);
+               nv_wr32(dev, 0x61008c, stat);
+               intr &= ~0x00000001;
+       }
+
        if (intr & 0x00000002) {
                u32 stat = nv_rd32(dev, 0x61009c);
                int chid = ffs(stat) - 1;
@@ -1215,38 +1647,29 @@ nvd0_display_intr(struct drm_device *dev)
 /******************************************************************************
  * Init
  *****************************************************************************/
-static void
+void
 nvd0_display_fini(struct drm_device *dev)
 {
        int i;
 
-       /* fini cursors */
-       for (i = 14; i >= 13; i--) {
-               if (!(nv_rd32(dev, 0x610490 + (i * 0x10)) & 0x00000001))
-                       continue;
-
-               nv_mask(dev, 0x610490 + (i * 0x10), 0x00000001, 0x00000000);
-               nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00000000);
-               nv_mask(dev, 0x610090, 1 << i, 0x00000000);
-               nv_mask(dev, 0x6100a0, 1 << i, 0x00000000);
+       /* fini cursors + overlays + flips */
+       for (i = 1; i >= 0; i--) {
+               evo_fini_pio(dev, EVO_CURS(i));
+               evo_fini_pio(dev, EVO_OIMM(i));
+               evo_fini_dma(dev, EVO_OVLY(i));
+               evo_fini_dma(dev, EVO_FLIP(i));
        }
 
        /* fini master */
-       if (nv_rd32(dev, 0x610490) & 0x00000010) {
-               nv_mask(dev, 0x610490, 0x00000010, 0x00000000);
-               nv_mask(dev, 0x610490, 0x00000003, 0x00000000);
-               nv_wait(dev, 0x610490, 0x80000000, 0x00000000);
-               nv_mask(dev, 0x610090, 0x00000001, 0x00000000);
-               nv_mask(dev, 0x6100a0, 0x00000001, 0x00000000);
-       }
+       evo_fini_dma(dev, EVO_MASTER);
 }
 
 int
 nvd0_display_init(struct drm_device *dev)
 {
        struct nvd0_display *disp = nvd0_display(dev);
+       int ret, i;
        u32 *push;
-       int i;
 
        if (nv_rd32(dev, 0x6100ac) & 0x00000100) {
                nv_wr32(dev, 0x6100ac, 0x00000100);
@@ -1271,7 +1694,7 @@ nvd0_display_init(struct drm_device *dev)
                nv_wr32(dev, 0x6301c4 + (i * 0x800), sor);
        }
 
-       for (i = 0; i < 2; i++) {
+       for (i = 0; i < dev->mode_config.num_crtc; i++) {
                u32 crtc0 = nv_rd32(dev, 0x616104 + (i * 0x800));
                u32 crtc1 = nv_rd32(dev, 0x616108 + (i * 0x800));
                u32 crtc2 = nv_rd32(dev, 0x61610c + (i * 0x800));
@@ -1285,36 +1708,24 @@ nvd0_display_init(struct drm_device *dev)
        nv_mask(dev, 0x6100b0, 0x00000307, 0x00000307);
 
        /* init master */
-       nv_wr32(dev, 0x610494, (disp->evo[0].handle >> 8) | 3);
-       nv_wr32(dev, 0x610498, 0x00010000);
-       nv_wr32(dev, 0x61049c, 0x00000001);
-       nv_mask(dev, 0x610490, 0x00000010, 0x00000010);
-       nv_wr32(dev, 0x640000, 0x00000000);
-       nv_wr32(dev, 0x610490, 0x01000013);
-       if (!nv_wait(dev, 0x610490, 0x80000000, 0x00000000)) {
-               NV_ERROR(dev, "PDISP: master 0x%08x\n",
-                        nv_rd32(dev, 0x610490));
-               return -EBUSY;
+       ret = evo_init_dma(dev, EVO_MASTER);
+       if (ret)
+               goto error;
+
+       /* init flips + overlays + cursors */
+       for (i = 0; i < dev->mode_config.num_crtc; i++) {
+               if ((ret = evo_init_dma(dev, EVO_FLIP(i))) ||
+                   (ret = evo_init_dma(dev, EVO_OVLY(i))) ||
+                   (ret = evo_init_pio(dev, EVO_OIMM(i))) ||
+                   (ret = evo_init_pio(dev, EVO_CURS(i))))
+                       goto error;
        }
-       nv_mask(dev, 0x610090, 0x00000001, 0x00000001);
-       nv_mask(dev, 0x6100a0, 0x00000001, 0x00000001);
 
-       /* init cursors */
-       for (i = 13; i <= 14; i++) {
-               nv_wr32(dev, 0x610490 + (i * 0x10), 0x00000001);
-               if (!nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00010000)) {
-                       NV_ERROR(dev, "PDISP: curs%d 0x%08x\n", i,
-                                nv_rd32(dev, 0x610490 + (i * 0x10)));
-                       return -EBUSY;
-               }
-
-               nv_mask(dev, 0x610090, 1 << i, 1 << i);
-               nv_mask(dev, 0x6100a0, 1 << i, 1 << i);
+       push = evo_wait(dev, EVO_MASTER, 32);
+       if (!push) {
+               ret = -EBUSY;
+               goto error;
        }
-
-       push = evo_wait(dev, 0, 32);
-       if (!push)
-               return -EBUSY;
        evo_mthd(push, 0x0088, 1);
        evo_data(push, NvEvoSync);
        evo_mthd(push, 0x0084, 1);
@@ -1323,9 +1734,12 @@ nvd0_display_init(struct drm_device *dev)
        evo_data(push, 0x80000000);
        evo_mthd(push, 0x008c, 1);
        evo_data(push, 0x00000000);
-       evo_kick(push, dev, 0);
+       evo_kick(push, dev, EVO_MASTER);
 
-       return 0;
+error:
+       if (ret)
+               nvd0_display_fini(dev);
+       return ret;
 }
 
 void
@@ -1334,11 +1748,16 @@ nvd0_display_destroy(struct drm_device *dev)
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nvd0_display *disp = nvd0_display(dev);
        struct pci_dev *pdev = dev->pdev;
+       int i;
 
-       nvd0_display_fini(dev);
+       for (i = 0; i < EVO_DMA_NR; i++) {
+               struct evo *evo = &disp->evo[i];
+               pci_free_consistent(pdev, PAGE_SIZE, evo->ptr, evo->handle);
+       }
 
-       pci_free_consistent(pdev, PAGE_SIZE, disp->evo[0].ptr, disp->evo[0].handle);
        nouveau_gpuobj_ref(NULL, &disp->mem);
+       nouveau_bo_unmap(disp->sync);
+       nouveau_bo_ref(NULL, &disp->sync);
        nouveau_irq_unregister(dev, 26);
 
        dev_priv->engine.display.priv = NULL;
@@ -1410,61 +1829,83 @@ nvd0_display_create(struct drm_device *dev)
        tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
        nouveau_irq_register(dev, 26, nvd0_display_intr);
 
+       /* small shared memory area we use for notifiers and semaphores */
+       ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+                            0, 0x0000, &disp->sync);
+       if (!ret) {
+               ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
+               if (!ret)
+                       ret = nouveau_bo_map(disp->sync);
+               if (ret)
+                       nouveau_bo_ref(NULL, &disp->sync);
+       }
+
+       if (ret)
+               goto out;
+
        /* hash table and dma objects for the memory areas we care about */
        ret = nouveau_gpuobj_new(dev, NULL, 0x4000, 0x10000,
                                 NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
        if (ret)
                goto out;
 
-       nv_wo32(disp->mem, 0x1000, 0x00000049);
-       nv_wo32(disp->mem, 0x1004, (disp->mem->vinst + 0x2000) >> 8);
-       nv_wo32(disp->mem, 0x1008, (disp->mem->vinst + 0x2fff) >> 8);
-       nv_wo32(disp->mem, 0x100c, 0x00000000);
-       nv_wo32(disp->mem, 0x1010, 0x00000000);
-       nv_wo32(disp->mem, 0x1014, 0x00000000);
-       nv_wo32(disp->mem, 0x0000, NvEvoSync);
-       nv_wo32(disp->mem, 0x0004, (0x1000 << 9) | 0x00000001);
-
-       nv_wo32(disp->mem, 0x1020, 0x00000049);
-       nv_wo32(disp->mem, 0x1024, 0x00000000);
-       nv_wo32(disp->mem, 0x1028, (dev_priv->vram_size - 1) >> 8);
-       nv_wo32(disp->mem, 0x102c, 0x00000000);
-       nv_wo32(disp->mem, 0x1030, 0x00000000);
-       nv_wo32(disp->mem, 0x1034, 0x00000000);
-       nv_wo32(disp->mem, 0x0008, NvEvoVRAM);
-       nv_wo32(disp->mem, 0x000c, (0x1020 << 9) | 0x00000001);
-
-       nv_wo32(disp->mem, 0x1040, 0x00000009);
-       nv_wo32(disp->mem, 0x1044, 0x00000000);
-       nv_wo32(disp->mem, 0x1048, (dev_priv->vram_size - 1) >> 8);
-       nv_wo32(disp->mem, 0x104c, 0x00000000);
-       nv_wo32(disp->mem, 0x1050, 0x00000000);
-       nv_wo32(disp->mem, 0x1054, 0x00000000);
-       nv_wo32(disp->mem, 0x0010, NvEvoVRAM_LP);
-       nv_wo32(disp->mem, 0x0014, (0x1040 << 9) | 0x00000001);
-
-       nv_wo32(disp->mem, 0x1060, 0x0fe00009);
-       nv_wo32(disp->mem, 0x1064, 0x00000000);
-       nv_wo32(disp->mem, 0x1068, (dev_priv->vram_size - 1) >> 8);
-       nv_wo32(disp->mem, 0x106c, 0x00000000);
-       nv_wo32(disp->mem, 0x1070, 0x00000000);
-       nv_wo32(disp->mem, 0x1074, 0x00000000);
-       nv_wo32(disp->mem, 0x0018, NvEvoFB32);
-       nv_wo32(disp->mem, 0x001c, (0x1060 << 9) | 0x00000001);
-
-       pinstmem->flush(dev);
+       /* create evo dma channels */
+       for (i = 0; i < EVO_DMA_NR; i++) {
+               struct evo *evo = &disp->evo[i];
+               u64 offset = disp->sync->bo.offset;
+               u32 dmao = 0x1000 + (i * 0x100);
+               u32 hash = 0x0000 + (i * 0x040);
+
+               evo->idx = i;
+               evo->sem.offset = EVO_SYNC(evo->idx, 0x00);
+               evo->ptr = pci_alloc_consistent(pdev, PAGE_SIZE, &evo->handle);
+               if (!evo->ptr) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
 
-       /* push buffers for evo channels */
-       disp->evo[0].ptr =
-               pci_alloc_consistent(pdev, PAGE_SIZE, &disp->evo[0].handle);
-       if (!disp->evo[0].ptr) {
-               ret = -ENOMEM;
-               goto out;
+               nv_wo32(disp->mem, dmao + 0x00, 0x00000049);
+               nv_wo32(disp->mem, dmao + 0x04, (offset + 0x0000) >> 8);
+               nv_wo32(disp->mem, dmao + 0x08, (offset + 0x0fff) >> 8);
+               nv_wo32(disp->mem, dmao + 0x0c, 0x00000000);
+               nv_wo32(disp->mem, dmao + 0x10, 0x00000000);
+               nv_wo32(disp->mem, dmao + 0x14, 0x00000000);
+               nv_wo32(disp->mem, hash + 0x00, NvEvoSync);
+               nv_wo32(disp->mem, hash + 0x04, 0x00000001 | (i << 27) |
+                                               ((dmao + 0x00) << 9));
+
+               nv_wo32(disp->mem, dmao + 0x20, 0x00000049);
+               nv_wo32(disp->mem, dmao + 0x24, 0x00000000);
+               nv_wo32(disp->mem, dmao + 0x28, (dev_priv->vram_size - 1) >> 8);
+               nv_wo32(disp->mem, dmao + 0x2c, 0x00000000);
+               nv_wo32(disp->mem, dmao + 0x30, 0x00000000);
+               nv_wo32(disp->mem, dmao + 0x34, 0x00000000);
+               nv_wo32(disp->mem, hash + 0x08, NvEvoVRAM);
+               nv_wo32(disp->mem, hash + 0x0c, 0x00000001 | (i << 27) |
+                                               ((dmao + 0x20) << 9));
+
+               nv_wo32(disp->mem, dmao + 0x40, 0x00000009);
+               nv_wo32(disp->mem, dmao + 0x44, 0x00000000);
+               nv_wo32(disp->mem, dmao + 0x48, (dev_priv->vram_size - 1) >> 8);
+               nv_wo32(disp->mem, dmao + 0x4c, 0x00000000);
+               nv_wo32(disp->mem, dmao + 0x50, 0x00000000);
+               nv_wo32(disp->mem, dmao + 0x54, 0x00000000);
+               nv_wo32(disp->mem, hash + 0x10, NvEvoVRAM_LP);
+               nv_wo32(disp->mem, hash + 0x14, 0x00000001 | (i << 27) |
+                                               ((dmao + 0x40) << 9));
+
+               nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009);
+               nv_wo32(disp->mem, dmao + 0x64, 0x00000000);
+               nv_wo32(disp->mem, dmao + 0x68, (dev_priv->vram_size - 1) >> 8);
+               nv_wo32(disp->mem, dmao + 0x6c, 0x00000000);
+               nv_wo32(disp->mem, dmao + 0x70, 0x00000000);
+               nv_wo32(disp->mem, dmao + 0x74, 0x00000000);
+               nv_wo32(disp->mem, hash + 0x18, NvEvoFB32);
+               nv_wo32(disp->mem, hash + 0x1c, 0x00000001 | (i << 27) |
+                                               ((dmao + 0x60) << 9));
        }
 
-       ret = nvd0_display_init(dev);
-       if (ret)
-               goto out;
+       pinstmem->flush(dev);
 
 out:
        if (ret)
index 4c8796b..6a5f439 100644 (file)
@@ -42,6 +42,20 @@ static struct pci_device_id pciidlist[] = {
        r128_PCI_IDS
 };
 
+static const struct file_operations r128_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = drm_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = r128_compat_ioctl,
+#endif
+       .llseek = noop_llseek,
+};
+
 static struct drm_driver driver = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
@@ -60,21 +74,7 @@ static struct drm_driver driver = {
        .reclaim_buffers = drm_core_reclaim_buffers,
        .ioctls = r128_ioctls,
        .dma_ioctl = r128_cce_buffers,
-       .fops = {
-               .owner = THIS_MODULE,
-               .open = drm_open,
-               .release = drm_release,
-               .unlocked_ioctl = drm_ioctl,
-               .mmap = drm_mmap,
-               .poll = drm_poll,
-               .fasync = drm_fasync,
-#ifdef CONFIG_COMPAT
-               .compat_ioctl = r128_compat_ioctl,
-#endif
-               .llseek = noop_llseek,
-       },
-
-
+       .fops = &r128_driver_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = DRIVER_DATE,
index cf8b4bc..2139fe8 100644 (file)
@@ -70,7 +70,8 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
        r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
        r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
        evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
-       radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o
+       radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o \
+       radeon_semaphore.o radeon_sa.o
 
 radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
 radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
@@ -78,4 +79,4 @@ radeon-$(CONFIG_ACPI) += radeon_acpi.o
 
 obj-$(CONFIG_DRM_RADEON)+= radeon.o
 
-CFLAGS_radeon_trace_points.o := -I$(src)
\ No newline at end of file
+CFLAGS_radeon_trace_points.o := -I$(src)
index 14cc88a..d1bd239 100644 (file)
@@ -665,6 +665,8 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
        SDEBUG("   count: %d\n", count);
        if (arg == ATOM_UNIT_MICROSEC)
                udelay(count);
+       else if (!drm_can_sleep())
+               mdelay(count);
        else
                msleep(count);
 }
index 2b97262..0fda830 100644 (file)
@@ -554,7 +554,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                if (encoder->crtc == crtc) {
                        radeon_encoder = to_radeon_encoder(encoder);
                        connector = radeon_get_connector_for_encoder(encoder);
-                       if (connector)
+                       if (connector && connector->display_info.bpc)
                                bpc = connector->display_info.bpc;
                        encoder_mode = atombios_get_encoder_mode(encoder);
                        if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
@@ -1184,7 +1184,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
        WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
 
-       fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
+       fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
        WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
        WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
 
@@ -1353,7 +1353,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
        WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
        WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
 
-       fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
+       fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
        WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
        WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
 
index 39c04c1..f1f06ca 100644 (file)
@@ -409,8 +409,6 @@ int
 atombios_get_encoder_mode(struct drm_encoder *encoder)
 {
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-       struct drm_device *dev = encoder->dev;
-       struct radeon_device *rdev = dev->dev_private;
        struct drm_connector *connector;
        struct radeon_connector *radeon_connector;
        struct radeon_connector_atom_dig *dig_connector;
@@ -434,13 +432,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
        switch (connector->connector_type) {
        case DRM_MODE_CONNECTOR_DVII:
        case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
-               if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
-                       /* fix me */
-                       if (ASIC_IS_DCE4(rdev))
-                               return ATOM_ENCODER_MODE_DVI;
-                       else
-                               return ATOM_ENCODER_MODE_HDMI;
-               } else if (radeon_connector->use_digital)
+               if (drm_detect_monitor_audio(radeon_connector->edid) &&
+                   radeon_audio)
+                       return ATOM_ENCODER_MODE_HDMI;
+               else if (radeon_connector->use_digital)
                        return ATOM_ENCODER_MODE_DVI;
                else
                        return ATOM_ENCODER_MODE_CRT;
@@ -448,13 +443,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
        case DRM_MODE_CONNECTOR_DVID:
        case DRM_MODE_CONNECTOR_HDMIA:
        default:
-               if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
-                       /* fix me */
-                       if (ASIC_IS_DCE4(rdev))
-                               return ATOM_ENCODER_MODE_DVI;
-                       else
-                               return ATOM_ENCODER_MODE_HDMI;
-               } else
+               if (drm_detect_monitor_audio(radeon_connector->edid) &&
+                   radeon_audio)
+                       return ATOM_ENCODER_MODE_HDMI;
+               else
                        return ATOM_ENCODER_MODE_DVI;
                break;
        case DRM_MODE_CONNECTOR_LVDS:
@@ -465,13 +457,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
                if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
                    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
                        return ATOM_ENCODER_MODE_DP;
-               else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
-                       /* fix me */
-                       if (ASIC_IS_DCE4(rdev))
-                               return ATOM_ENCODER_MODE_DVI;
-                       else
-                               return ATOM_ENCODER_MODE_HDMI;
-               } else
+               else if (drm_detect_monitor_audio(radeon_connector->edid) &&
+                        radeon_audio)
+                       return ATOM_ENCODER_MODE_HDMI;
+               else
                        return ATOM_ENCODER_MODE_DVI;
                break;
        case DRM_MODE_CONNECTOR_eDP:
index 92c9628..636660f 100644 (file)
@@ -40,6 +40,8 @@
 static void evergreen_gpu_init(struct radeon_device *rdev);
 void evergreen_fini(struct radeon_device *rdev);
 void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+                                    int ring, u32 cp_int_cntl);
 
 void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
 {
@@ -1311,18 +1313,20 @@ void evergreen_mc_program(struct radeon_device *rdev)
  */
 void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 {
+       struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+
        /* set to DX10/11 mode */
-       radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
-       radeon_ring_write(rdev, 1);
+       radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+       radeon_ring_write(ring, 1);
        /* FIXME: implement */
-       radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+       radeon_ring_write(ring,
 #ifdef __BIG_ENDIAN
                          (2 << 0) |
 #endif
                          (ib->gpu_addr & 0xFFFFFFFC));
-       radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
-       radeon_ring_write(rdev, ib->length_dw);
+       radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+       radeon_ring_write(ring, ib->length_dw);
 }
 
 
@@ -1360,71 +1364,73 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
 
 static int evergreen_cp_start(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r, i;
        uint32_t cp_me;
 
-       r = radeon_ring_lock(rdev, 7);
+       r = radeon_ring_lock(rdev, ring, 7);
        if (r) {
                DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
                return r;
        }
-       radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
-       radeon_ring_write(rdev, 0x1);
-       radeon_ring_write(rdev, 0x0);
-       radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
-       radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+       radeon_ring_write(ring, 0x1);
+       radeon_ring_write(ring, 0x0);
+       radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
+       radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_unlock_commit(rdev, ring);
 
        cp_me = 0xff;
        WREG32(CP_ME_CNTL, cp_me);
 
-       r = radeon_ring_lock(rdev, evergreen_default_size + 19);
+       r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
        if (r) {
                DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
                return r;
        }
 
        /* setup clear context state */
-       radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
-       radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+       radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+       radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
 
        for (i = 0; i < evergreen_default_size; i++)
-               radeon_ring_write(rdev, evergreen_default_state[i]);
+               radeon_ring_write(ring, evergreen_default_state[i]);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
-       radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
+       radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+       radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
 
        /* set clear context state */
-       radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+       radeon_ring_write(ring, 0);
 
        /* SQ_VTX_BASE_VTX_LOC */
-       radeon_ring_write(rdev, 0xc0026f00);
-       radeon_ring_write(rdev, 0x00000000);
-       radeon_ring_write(rdev, 0x00000000);
-       radeon_ring_write(rdev, 0x00000000);
+       radeon_ring_write(ring, 0xc0026f00);
+       radeon_ring_write(ring, 0x00000000);
+       radeon_ring_write(ring, 0x00000000);
+       radeon_ring_write(ring, 0x00000000);
 
        /* Clear consts */
-       radeon_ring_write(rdev, 0xc0036f00);
-       radeon_ring_write(rdev, 0x00000bc4);
-       radeon_ring_write(rdev, 0xffffffff);
-       radeon_ring_write(rdev, 0xffffffff);
-       radeon_ring_write(rdev, 0xffffffff);
+       radeon_ring_write(ring, 0xc0036f00);
+       radeon_ring_write(ring, 0x00000bc4);
+       radeon_ring_write(ring, 0xffffffff);
+       radeon_ring_write(ring, 0xffffffff);
+       radeon_ring_write(ring, 0xffffffff);
 
-       radeon_ring_write(rdev, 0xc0026900);
-       radeon_ring_write(rdev, 0x00000316);
-       radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
-       radeon_ring_write(rdev, 0x00000010); /*  */
+       radeon_ring_write(ring, 0xc0026900);
+       radeon_ring_write(ring, 0x00000316);
+       radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+       radeon_ring_write(ring, 0x00000010); /*  */
 
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_unlock_commit(rdev, ring);
 
        return 0;
 }
 
 int evergreen_cp_resume(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 tmp;
        u32 rb_bufsz;
        int r;
@@ -1442,13 +1448,13 @@ int evergreen_cp_resume(struct radeon_device *rdev)
        RREG32(GRBM_SOFT_RESET);
 
        /* Set ring buffer size */
-       rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+       rb_bufsz = drm_order(ring->ring_size / 8);
        tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
 #endif
        WREG32(CP_RB_CNTL, tmp);
-       WREG32(CP_SEM_WAIT_TIMER, 0x4);
+       WREG32(CP_SEM_WAIT_TIMER, 0x0);
 
        /* Set the write pointer delay */
        WREG32(CP_RB_WPTR_DELAY, 0);
@@ -1456,8 +1462,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
        WREG32(CP_RB_RPTR_WR, 0);
-       rdev->cp.wptr = 0;
-       WREG32(CP_RB_WPTR, rdev->cp.wptr);
+       ring->wptr = 0;
+       WREG32(CP_RB_WPTR, ring->wptr);
 
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB_RPTR_ADDR,
@@ -1475,16 +1481,16 @@ int evergreen_cp_resume(struct radeon_device *rdev)
        mdelay(1);
        WREG32(CP_RB_CNTL, tmp);
 
-       WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
+       WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
        WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
 
-       rdev->cp.rptr = RREG32(CP_RB_RPTR);
+       ring->rptr = RREG32(CP_RB_RPTR);
 
        evergreen_cp_start(rdev);
-       rdev->cp.ready = true;
-       r = radeon_ring_test(rdev);
+       ring->ready = true;
+       r = radeon_ring_test(rdev, ring);
        if (r) {
-               rdev->cp.ready = false;
+               ring->ready = false;
                return r;
        }
        return 0;
@@ -2353,7 +2359,7 @@ int evergreen_mc_init(struct radeon_device *rdev)
        return 0;
 }
 
-bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        u32 srbm_status;
        u32 grbm_status;
@@ -2366,19 +2372,19 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
        grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
        grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
        if (!(grbm_status & GUI_ACTIVE)) {
-               r100_gpu_lockup_update(lockup, &rdev->cp);
+               r100_gpu_lockup_update(lockup, ring);
                return false;
        }
        /* force CP activities */
-       r = radeon_ring_lock(rdev, 2);
+       r = radeon_ring_lock(rdev, ring, 2);
        if (!r) {
                /* PACKET2 NOP */
-               radeon_ring_write(rdev, 0x80000000);
-               radeon_ring_write(rdev, 0x80000000);
-               radeon_ring_unlock_commit(rdev);
+               radeon_ring_write(ring, 0x80000000);
+               radeon_ring_write(ring, 0x80000000);
+               radeon_ring_unlock_commit(rdev, ring);
        }
-       rdev->cp.rptr = RREG32(CP_RB_RPTR);
-       return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
+       ring->rptr = RREG32(CP_RB_RPTR);
+       return r100_gpu_cp_is_lockup(rdev, lockup, ring);
 }
 
 static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@@ -2470,7 +2476,13 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
 {
        u32 tmp;
 
-       WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+       if (rdev->family >= CHIP_CAYMAN) {
+               cayman_cp_int_cntl_setup(rdev, 0,
+                                        CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+               cayman_cp_int_cntl_setup(rdev, 1, 0);
+               cayman_cp_int_cntl_setup(rdev, 2, 0);
+       } else
+               WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
        WREG32(GRBM_INT_CNTL, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -2515,6 +2527,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
 int evergreen_irq_set(struct radeon_device *rdev)
 {
        u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+       u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
        u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
        u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
        u32 grbm_int_cntl = 0;
@@ -2539,11 +2552,28 @@ int evergreen_irq_set(struct radeon_device *rdev)
        hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
        hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
 
-       if (rdev->irq.sw_int) {
-               DRM_DEBUG("evergreen_irq_set: sw int\n");
-               cp_int_cntl |= RB_INT_ENABLE;
-               cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+       if (rdev->family >= CHIP_CAYMAN) {
+               /* enable CP interrupts on all rings */
+               if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+                       DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+                       cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+               }
+               if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) {
+                       DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
+                       cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
+               }
+               if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) {
+                       DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
+                       cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
+               }
+       } else {
+               if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+                       DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+                       cp_int_cntl |= RB_INT_ENABLE;
+                       cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+               }
        }
+
        if (rdev->irq.crtc_vblank_int[0] ||
            rdev->irq.pflip[0]) {
                DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -2603,7 +2633,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
                grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
        }
 
-       WREG32(CP_INT_CNTL, cp_int_cntl);
+       if (rdev->family >= CHIP_CAYMAN) {
+               cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
+               cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
+               cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
+       } else
+               WREG32(CP_INT_CNTL, cp_int_cntl);
        WREG32(GRBM_INT_CNTL, grbm_int_cntl);
 
        WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3018,11 +3053,24 @@ restart_ih:
                case 177: /* CP_INT in IB1 */
                case 178: /* CP_INT in IB2 */
                        DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
-                       radeon_fence_process(rdev);
+                       radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                        break;
                case 181: /* CP EOP event */
                        DRM_DEBUG("IH: CP EOP\n");
-                       radeon_fence_process(rdev);
+                       if (rdev->family >= CHIP_CAYMAN) {
+                               switch (src_data) {
+                               case 0:
+                                       radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+                                       break;
+                               case 1:
+                                       radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+                                       break;
+                               case 2:
+                                       radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+                                       break;
+                               }
+                       } else
+                               radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                        break;
                case 233: /* GUI IDLE */
                        DRM_DEBUG("IH: GUI idle\n");
@@ -3052,6 +3100,7 @@ restart_ih:
 
 static int evergreen_startup(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r;
 
        /* enable pcie gen2 link */
@@ -3106,6 +3155,12 @@ static int evergreen_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
@@ -3115,7 +3170,9 @@ static int evergreen_startup(struct radeon_device *rdev)
        }
        evergreen_irq_set(rdev);
 
-       r = radeon_ring_init(rdev, rdev->cp.ring_size);
+       r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+                            R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+                            0, 0xfffff, RADEON_CP_PACKET2);
        if (r)
                return r;
        r = evergreen_cp_load_microcode(rdev);
@@ -3125,6 +3182,22 @@ static int evergreen_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
+       r = r600_audio_init(rdev);
+       if (r) {
+               DRM_ERROR("radeon: audio init failed\n");
+               return r;
+       }
+
        return 0;
 }
 
@@ -3144,31 +3217,30 @@ int evergreen_resume(struct radeon_device *rdev)
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
 
+       rdev->accel_working = true;
        r = evergreen_startup(rdev);
        if (r) {
                DRM_ERROR("evergreen startup failed on resume\n");
                return r;
        }
 
-       r = r600_ib_test(rdev);
-       if (r) {
-               DRM_ERROR("radeon: failed testing IB (%d).\n", r);
-               return r;
-       }
-
        return r;
 
 }
 
 int evergreen_suspend(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
+       r600_audio_fini(rdev);
        /* FIXME: we should wait for ring to be empty */
+       radeon_ib_pool_suspend(rdev);
+       r600_blit_suspend(rdev);
        r700_cp_stop(rdev);
-       rdev->cp.ready = false;
+       ring->ready = false;
        evergreen_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        evergreen_pcie_gart_disable(rdev);
-       r600_blit_suspend(rdev);
 
        return 0;
 }
@@ -3243,8 +3315,8 @@ int evergreen_init(struct radeon_device *rdev)
        if (r)
                return r;
 
-       rdev->cp.ring_obj = NULL;
-       r600_ring_init(rdev, 1024 * 1024);
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+       r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
@@ -3253,29 +3325,24 @@ int evergreen_init(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
        r = evergreen_startup(rdev);
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                r700_cp_fini(rdev);
                r600_irq_fini(rdev);
                radeon_wb_fini(rdev);
+               r100_ib_fini(rdev);
                radeon_irq_kms_fini(rdev);
                evergreen_pcie_gart_fini(rdev);
                rdev->accel_working = false;
        }
-       if (rdev->accel_working) {
-               r = radeon_ib_pool_init(rdev);
-               if (r) {
-                       DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
-                       rdev->accel_working = false;
-               }
-               r = r600_ib_test(rdev);
-               if (r) {
-                       DRM_ERROR("radeon: failed testing IB (%d).\n", r);
-                       rdev->accel_working = false;
-               }
-       }
 
        /* Don't start up if the MC ucode is missing on BTC parts.
         * The default clocks and voltages before the MC ucode
@@ -3293,15 +3360,17 @@ int evergreen_init(struct radeon_device *rdev)
 
 void evergreen_fini(struct radeon_device *rdev)
 {
+       r600_audio_fini(rdev);
        r600_blit_fini(rdev);
        r700_cp_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
-       radeon_ib_pool_fini(rdev);
+       r100_ib_fini(rdev);
        radeon_irq_kms_fini(rdev);
        evergreen_pcie_gart_fini(rdev);
        r600_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
+       radeon_semaphore_driver_fini(rdev);
        radeon_fence_driver_fini(rdev);
        radeon_agp_fini(rdev);
        radeon_bo_fini(rdev);
index 914e5af..2379849 100644 (file)
@@ -49,6 +49,7 @@ static void
 set_render_target(struct radeon_device *rdev, int format,
                  int w, int h, u64 gpu_addr)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 cb_color_info;
        int pitch, slice;
 
@@ -62,23 +63,23 @@ set_render_target(struct radeon_device *rdev, int format,
        pitch = (w / 8) - 1;
        slice = ((w * h) / 64) - 1;
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
-       radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
-       radeon_ring_write(rdev, gpu_addr >> 8);
-       radeon_ring_write(rdev, pitch);
-       radeon_ring_write(rdev, slice);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, cb_color_info);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
+       radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
+       radeon_ring_write(ring, gpu_addr >> 8);
+       radeon_ring_write(ring, pitch);
+       radeon_ring_write(ring, slice);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, cb_color_info);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, (w - 1) | ((h - 1) << 16));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
 }
 
 /* emits 5dw */
@@ -87,6 +88,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
                    u32 sync_type, u32 size,
                    u64 mc_addr)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 cp_coher_size;
 
        if (size == 0xffffffff)
@@ -99,39 +101,40 @@ cp_set_surface_sync(struct radeon_device *rdev,
                 * to the RB directly. For IBs, the CP programs this as part of the
                 * surface_sync packet.
                 */
-               radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-               radeon_ring_write(rdev, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
-               radeon_ring_write(rdev, 0); /* CP_COHER_CNTL2 */
+               radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+               radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
+               radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */
        }
-       radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
-       radeon_ring_write(rdev, sync_type);
-       radeon_ring_write(rdev, cp_coher_size);
-       radeon_ring_write(rdev, mc_addr >> 8);
-       radeon_ring_write(rdev, 10); /* poll interval */
+       radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+       radeon_ring_write(ring, sync_type);
+       radeon_ring_write(ring, cp_coher_size);
+       radeon_ring_write(ring, mc_addr >> 8);
+       radeon_ring_write(ring, 10); /* poll interval */
 }
 
 /* emits 11dw + 1 surface sync = 16dw */
 static void
 set_shaders(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u64 gpu_addr;
 
        /* VS */
        gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
-       radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
-       radeon_ring_write(rdev, gpu_addr >> 8);
-       radeon_ring_write(rdev, 2);
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
+       radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+       radeon_ring_write(ring, gpu_addr >> 8);
+       radeon_ring_write(ring, 2);
+       radeon_ring_write(ring, 0);
 
        /* PS */
        gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
-       radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
-       radeon_ring_write(rdev, gpu_addr >> 8);
-       radeon_ring_write(rdev, 1);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 2);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
+       radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+       radeon_ring_write(ring, gpu_addr >> 8);
+       radeon_ring_write(ring, 1);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 2);
 
        gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
        cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
@@ -141,6 +144,7 @@ set_shaders(struct radeon_device *rdev)
 static void
 set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
 
        /* high addr, stride */
@@ -155,16 +159,16 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
                SQ_VTCX_SEL_Z(SQ_SEL_Z) |
                SQ_VTCX_SEL_W(SQ_SEL_W);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
-       radeon_ring_write(rdev, 0x580);
-       radeon_ring_write(rdev, gpu_addr & 0xffffffff);
-       radeon_ring_write(rdev, 48 - 1); /* size */
-       radeon_ring_write(rdev, sq_vtx_constant_word2);
-       radeon_ring_write(rdev, sq_vtx_constant_word3);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
+       radeon_ring_write(ring, 0x580);
+       radeon_ring_write(ring, gpu_addr & 0xffffffff);
+       radeon_ring_write(ring, 48 - 1); /* size */
+       radeon_ring_write(ring, sq_vtx_constant_word2);
+       radeon_ring_write(ring, sq_vtx_constant_word3);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
 
        if ((rdev->family == CHIP_CEDAR) ||
            (rdev->family == CHIP_PALM) ||
@@ -185,6 +189,7 @@ set_tex_resource(struct radeon_device *rdev,
                 int format, int w, int h, int pitch,
                 u64 gpu_addr, u32 size)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 sq_tex_resource_word0, sq_tex_resource_word1;
        u32 sq_tex_resource_word4, sq_tex_resource_word7;
 
@@ -208,16 +213,16 @@ set_tex_resource(struct radeon_device *rdev,
        cp_set_surface_sync(rdev,
                            PACKET3_TC_ACTION_ENA, size, gpu_addr);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, sq_tex_resource_word0);
-       radeon_ring_write(rdev, sq_tex_resource_word1);
-       radeon_ring_write(rdev, gpu_addr >> 8);
-       radeon_ring_write(rdev, gpu_addr >> 8);
-       radeon_ring_write(rdev, sq_tex_resource_word4);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, sq_tex_resource_word7);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, sq_tex_resource_word0);
+       radeon_ring_write(ring, sq_tex_resource_word1);
+       radeon_ring_write(ring, gpu_addr >> 8);
+       radeon_ring_write(ring, gpu_addr >> 8);
+       radeon_ring_write(ring, sq_tex_resource_word4);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, sq_tex_resource_word7);
 }
 
 /* emits 12 */
@@ -225,6 +230,7 @@ static void
 set_scissors(struct radeon_device *rdev, int x1, int y1,
             int x2, int y2)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        /* workaround some hw bugs */
        if (x2 == 0)
                x1 = 1;
@@ -235,43 +241,44 @@ set_scissors(struct radeon_device *rdev, int x1, int y1,
                        x2 = 2;
        }
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
-       radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
-       radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
-       radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+       radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+       radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
+       radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
-       radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
-       radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
-       radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+       radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+       radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+       radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
-       radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
-       radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
-       radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+       radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+       radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+       radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
 }
 
 /* emits 10 */
 static void
 draw_auto(struct radeon_device *rdev)
 {
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-       radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
-       radeon_ring_write(rdev, DI_PT_RECTLIST);
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+       radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
+       radeon_ring_write(ring, DI_PT_RECTLIST);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
+       radeon_ring_write(ring,
 #ifdef __BIG_ENDIAN
                          (2 << 2) |
 #endif
                          DI_INDEX_SIZE_16_BIT);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
-       radeon_ring_write(rdev, 1);
+       radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
+       radeon_ring_write(ring, 1);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
-       radeon_ring_write(rdev, 3);
-       radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
+       radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+       radeon_ring_write(ring, 3);
+       radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
 
 }
 
@@ -279,6 +286,7 @@ draw_auto(struct radeon_device *rdev)
 static void
 set_default_state(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
        u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
        u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
@@ -292,8 +300,8 @@ set_default_state(struct radeon_device *rdev)
        int dwords;
 
        /* set clear context state */
-       radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+       radeon_ring_write(ring, 0);
 
        if (rdev->family < CHIP_CAYMAN) {
                switch (rdev->family) {
@@ -550,60 +558,60 @@ set_default_state(struct radeon_device *rdev)
                                            NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
 
                /* disable dyn gprs */
-               radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-               radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
-               radeon_ring_write(rdev, 0);
+               radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+               radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
+               radeon_ring_write(ring, 0);
 
                /* setup LDS */
-               radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-               radeon_ring_write(rdev, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
-               radeon_ring_write(rdev, 0x10001000);
+               radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+               radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
+               radeon_ring_write(ring, 0x10001000);
 
                /* SQ config */
-               radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
-               radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
-               radeon_ring_write(rdev, sq_config);
-               radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
-               radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
-               radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
-               radeon_ring_write(rdev, 0);
-               radeon_ring_write(rdev, 0);
-               radeon_ring_write(rdev, sq_thread_resource_mgmt);
-               radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
-               radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
-               radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
-               radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
+               radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11));
+               radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
+               radeon_ring_write(ring, sq_config);
+               radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
+               radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
+               radeon_ring_write(ring, sq_gpr_resource_mgmt_3);
+               radeon_ring_write(ring, 0);
+               radeon_ring_write(ring, 0);
+               radeon_ring_write(ring, sq_thread_resource_mgmt);
+               radeon_ring_write(ring, sq_thread_resource_mgmt_2);
+               radeon_ring_write(ring, sq_stack_resource_mgmt_1);
+               radeon_ring_write(ring, sq_stack_resource_mgmt_2);
+               radeon_ring_write(ring, sq_stack_resource_mgmt_3);
        }
 
        /* CONTEXT_CONTROL */
-       radeon_ring_write(rdev, 0xc0012800);
-       radeon_ring_write(rdev, 0x80000000);
-       radeon_ring_write(rdev, 0x80000000);
+       radeon_ring_write(ring, 0xc0012800);
+       radeon_ring_write(ring, 0x80000000);
+       radeon_ring_write(ring, 0x80000000);
 
        /* SQ_VTX_BASE_VTX_LOC */
-       radeon_ring_write(rdev, 0xc0026f00);
-       radeon_ring_write(rdev, 0x00000000);
-       radeon_ring_write(rdev, 0x00000000);
-       radeon_ring_write(rdev, 0x00000000);
+       radeon_ring_write(ring, 0xc0026f00);
+       radeon_ring_write(ring, 0x00000000);
+       radeon_ring_write(ring, 0x00000000);
+       radeon_ring_write(ring, 0x00000000);
 
        /* SET_SAMPLER */
-       radeon_ring_write(rdev, 0xc0036e00);
-       radeon_ring_write(rdev, 0x00000000);
-       radeon_ring_write(rdev, 0x00000012);
-       radeon_ring_write(rdev, 0x00000000);
-       radeon_ring_write(rdev, 0x00000000);
+       radeon_ring_write(ring, 0xc0036e00);
+       radeon_ring_write(ring, 0x00000000);
+       radeon_ring_write(ring, 0x00000012);
+       radeon_ring_write(ring, 0x00000000);
+       radeon_ring_write(ring, 0x00000000);
 
        /* set to DX10/11 mode */
-       radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
-       radeon_ring_write(rdev, 1);
+       radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+       radeon_ring_write(ring, 1);
 
        /* emit an IB pointing at default state */
        dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
        gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
-       radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
-       radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
-       radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
-       radeon_ring_write(rdev, dwords);
+       radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+       radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC);
+       radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
+       radeon_ring_write(ring, dwords);
 
 }
 
index cd4590a..f7442e6 100644 (file)
@@ -520,7 +520,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                break;
        case DB_Z_INFO:
                track->db_z_info = radeon_get_ib_value(p, idx);
-               if (!p->keep_tiling_flags) {
+               if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
                        r = evergreen_cs_packet_next_reloc(p, &reloc);
                        if (r) {
                                dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -649,7 +649,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
        case CB_COLOR7_INFO:
                tmp = (reg - CB_COLOR0_INFO) / 0x3c;
                track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
-               if (!p->keep_tiling_flags) {
+               if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
                        r = evergreen_cs_packet_next_reloc(p, &reloc);
                        if (r) {
                                dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -666,7 +666,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
        case CB_COLOR11_INFO:
                tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
                track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
-               if (!p->keep_tiling_flags) {
+               if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
                        r = evergreen_cs_packet_next_reloc(p, &reloc);
                        if (r) {
                                dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -1355,7 +1355,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                                        return -EINVAL;
                                }
                                ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
-                               if (!p->keep_tiling_flags) {
+                               if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
                                        ib[idx+1+(i*8)+1] |=
                                                TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
                                        if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
@@ -1572,3 +1572,241 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
        return 0;
 }
 
+/* vm parser */
+static bool evergreen_vm_reg_valid(u32 reg)
+{
+       /* context regs are fine */
+       if (reg >= 0x28000)
+               return true;
+
+       /* check config regs */
+       switch (reg) {
+       case GRBM_GFX_INDEX:
+       case VGT_VTX_VECT_EJECT_REG:
+       case VGT_CACHE_INVALIDATION:
+       case VGT_GS_VERTEX_REUSE:
+       case VGT_PRIMITIVE_TYPE:
+       case VGT_INDEX_TYPE:
+       case VGT_NUM_INDICES:
+       case VGT_NUM_INSTANCES:
+       case VGT_COMPUTE_DIM_X:
+       case VGT_COMPUTE_DIM_Y:
+       case VGT_COMPUTE_DIM_Z:
+       case VGT_COMPUTE_START_X:
+       case VGT_COMPUTE_START_Y:
+       case VGT_COMPUTE_START_Z:
+       case VGT_COMPUTE_INDEX:
+       case VGT_COMPUTE_THREAD_GROUP_SIZE:
+       case VGT_HS_OFFCHIP_PARAM:
+       case PA_CL_ENHANCE:
+       case PA_SU_LINE_STIPPLE_VALUE:
+       case PA_SC_LINE_STIPPLE_STATE:
+       case PA_SC_ENHANCE:
+       case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
+       case SQ_DYN_GPR_SIMD_LOCK_EN:
+       case SQ_CONFIG:
+       case SQ_GPR_RESOURCE_MGMT_1:
+       case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
+       case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
+       case SQ_CONST_MEM_BASE:
+       case SQ_STATIC_THREAD_MGMT_1:
+       case SQ_STATIC_THREAD_MGMT_2:
+       case SQ_STATIC_THREAD_MGMT_3:
+       case SPI_CONFIG_CNTL:
+       case SPI_CONFIG_CNTL_1:
+       case TA_CNTL_AUX:
+       case DB_DEBUG:
+       case DB_DEBUG2:
+       case DB_DEBUG3:
+       case DB_DEBUG4:
+       case DB_WATERMARKS:
+       case TD_PS_BORDER_COLOR_INDEX:
+       case TD_PS_BORDER_COLOR_RED:
+       case TD_PS_BORDER_COLOR_GREEN:
+       case TD_PS_BORDER_COLOR_BLUE:
+       case TD_PS_BORDER_COLOR_ALPHA:
+       case TD_VS_BORDER_COLOR_INDEX:
+       case TD_VS_BORDER_COLOR_RED:
+       case TD_VS_BORDER_COLOR_GREEN:
+       case TD_VS_BORDER_COLOR_BLUE:
+       case TD_VS_BORDER_COLOR_ALPHA:
+       case TD_GS_BORDER_COLOR_INDEX:
+       case TD_GS_BORDER_COLOR_RED:
+       case TD_GS_BORDER_COLOR_GREEN:
+       case TD_GS_BORDER_COLOR_BLUE:
+       case TD_GS_BORDER_COLOR_ALPHA:
+       case TD_HS_BORDER_COLOR_INDEX:
+       case TD_HS_BORDER_COLOR_RED:
+       case TD_HS_BORDER_COLOR_GREEN:
+       case TD_HS_BORDER_COLOR_BLUE:
+       case TD_HS_BORDER_COLOR_ALPHA:
+       case TD_LS_BORDER_COLOR_INDEX:
+       case TD_LS_BORDER_COLOR_RED:
+       case TD_LS_BORDER_COLOR_GREEN:
+       case TD_LS_BORDER_COLOR_BLUE:
+       case TD_LS_BORDER_COLOR_ALPHA:
+       case TD_CS_BORDER_COLOR_INDEX:
+       case TD_CS_BORDER_COLOR_RED:
+       case TD_CS_BORDER_COLOR_GREEN:
+       case TD_CS_BORDER_COLOR_BLUE:
+       case TD_CS_BORDER_COLOR_ALPHA:
+       case SQ_ESGS_RING_SIZE:
+       case SQ_GSVS_RING_SIZE:
+       case SQ_ESTMP_RING_SIZE:
+       case SQ_GSTMP_RING_SIZE:
+       case SQ_HSTMP_RING_SIZE:
+       case SQ_LSTMP_RING_SIZE:
+       case SQ_PSTMP_RING_SIZE:
+       case SQ_VSTMP_RING_SIZE:
+       case SQ_ESGS_RING_ITEMSIZE:
+       case SQ_ESTMP_RING_ITEMSIZE:
+       case SQ_GSTMP_RING_ITEMSIZE:
+       case SQ_GSVS_RING_ITEMSIZE:
+       case SQ_GS_VERT_ITEMSIZE:
+       case SQ_GS_VERT_ITEMSIZE_1:
+       case SQ_GS_VERT_ITEMSIZE_2:
+       case SQ_GS_VERT_ITEMSIZE_3:
+       case SQ_GSVS_RING_OFFSET_1:
+       case SQ_GSVS_RING_OFFSET_2:
+       case SQ_GSVS_RING_OFFSET_3:
+       case SQ_HSTMP_RING_ITEMSIZE:
+       case SQ_LSTMP_RING_ITEMSIZE:
+       case SQ_PSTMP_RING_ITEMSIZE:
+       case SQ_VSTMP_RING_ITEMSIZE:
+       case VGT_TF_RING_SIZE:
+       case SQ_ESGS_RING_BASE:
+       case SQ_GSVS_RING_BASE:
+       case SQ_ESTMP_RING_BASE:
+       case SQ_GSTMP_RING_BASE:
+       case SQ_HSTMP_RING_BASE:
+       case SQ_LSTMP_RING_BASE:
+       case SQ_PSTMP_RING_BASE:
+       case SQ_VSTMP_RING_BASE:
+       case CAYMAN_VGT_OFFCHIP_LDS_BASE:
+       case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static int evergreen_vm_packet3_check(struct radeon_device *rdev,
+                                     u32 *ib, struct radeon_cs_packet *pkt)
+{
+       u32 idx = pkt->idx + 1;
+       u32 idx_value = ib[idx];
+       u32 start_reg, end_reg, reg, i;
+
+       switch (pkt->opcode) {
+       case PACKET3_NOP:
+       case PACKET3_SET_BASE:
+       case PACKET3_CLEAR_STATE:
+       case PACKET3_INDEX_BUFFER_SIZE:
+       case PACKET3_DISPATCH_DIRECT:
+       case PACKET3_DISPATCH_INDIRECT:
+       case PACKET3_MODE_CONTROL:
+       case PACKET3_SET_PREDICATION:
+       case PACKET3_COND_EXEC:
+       case PACKET3_PRED_EXEC:
+       case PACKET3_DRAW_INDIRECT:
+       case PACKET3_DRAW_INDEX_INDIRECT:
+       case PACKET3_INDEX_BASE:
+       case PACKET3_DRAW_INDEX_2:
+       case PACKET3_CONTEXT_CONTROL:
+       case PACKET3_DRAW_INDEX_OFFSET:
+       case PACKET3_INDEX_TYPE:
+       case PACKET3_DRAW_INDEX:
+       case PACKET3_DRAW_INDEX_AUTO:
+       case PACKET3_DRAW_INDEX_IMMD:
+       case PACKET3_NUM_INSTANCES:
+       case PACKET3_DRAW_INDEX_MULTI_AUTO:
+       case PACKET3_STRMOUT_BUFFER_UPDATE:
+       case PACKET3_DRAW_INDEX_OFFSET_2:
+       case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
+       case PACKET3_MPEG_INDEX:
+       case PACKET3_WAIT_REG_MEM:
+       case PACKET3_MEM_WRITE:
+       case PACKET3_SURFACE_SYNC:
+       case PACKET3_EVENT_WRITE:
+       case PACKET3_EVENT_WRITE_EOP:
+       case PACKET3_EVENT_WRITE_EOS:
+       case PACKET3_SET_CONTEXT_REG:
+       case PACKET3_SET_BOOL_CONST:
+       case PACKET3_SET_LOOP_CONST:
+       case PACKET3_SET_RESOURCE:
+       case PACKET3_SET_SAMPLER:
+       case PACKET3_SET_CTL_CONST:
+       case PACKET3_SET_RESOURCE_OFFSET:
+       case PACKET3_SET_CONTEXT_REG_INDIRECT:
+       case PACKET3_SET_RESOURCE_INDIRECT:
+       case CAYMAN_PACKET3_DEALLOC_STATE:
+               break;
+       case PACKET3_COND_WRITE:
+               if (idx_value & 0x100) {
+                       reg = ib[idx + 5] * 4;
+                       if (!evergreen_vm_reg_valid(reg))
+                               return -EINVAL;
+               }
+               break;
+       case PACKET3_COPY_DW:
+               if (idx_value & 0x2) {
+                       reg = ib[idx + 3] * 4;
+                       if (!evergreen_vm_reg_valid(reg))
+                               return -EINVAL;
+               }
+               break;
+       case PACKET3_SET_CONFIG_REG:
+               start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
+               end_reg = 4 * pkt->count + start_reg - 4;
+               if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
+                   (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+                   (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+                       DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+                       return -EINVAL;
+               }
+               for (i = 0; i < pkt->count; i++) {
+                       reg = start_reg + (4 * i);
+                       if (!evergreen_vm_reg_valid(reg))
+                               return -EINVAL;
+               }
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+       int ret = 0;
+       u32 idx = 0;
+       struct radeon_cs_packet pkt;
+
+       do {
+               pkt.idx = idx;
+               pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
+               pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
+               pkt.one_reg_wr = 0;
+               switch (pkt.type) {
+               case PACKET_TYPE0:
+                       dev_err(rdev->dev, "Packet0 not allowed!\n");
+                       ret = -EINVAL;
+                       break;
+               case PACKET_TYPE2:
+                       break;
+               case PACKET_TYPE3:
+                       pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+                       ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
+                       break;
+               default:
+                       dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
+                       ret = -EINVAL;
+                       break;
+               }
+               if (ret)
+                       break;
+               idx += pkt.count + 2;
+       } while (idx < ib->length_dw);
+
+       return ret;
+}
index 7d7f215..4215de9 100644 (file)
 #define EVERGREEN_P1PLL_SS_CNTL                         0x414
 #define EVERGREEN_P2PLL_SS_CNTL                         0x454
 #       define EVERGREEN_PxPLL_SS_EN                    (1 << 12)
+
+#define EVERGREEN_AUDIO_PLL1_MUL                       0x5b0
+#define EVERGREEN_AUDIO_PLL1_DIV                       0x5b4
+#define EVERGREEN_AUDIO_PLL1_UNK                       0x5bc
+
+#define EVERGREEN_AUDIO_ENABLE                         0x5e78
+#define EVERGREEN_AUDIO_VENDOR_ID                      0x5ec0
+
 /* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
 #define EVERGREEN_GRPH_ENABLE                           0x6800
 #define EVERGREEN_GRPH_CONTROL                          0x6804
 #define EVERGREEN_DC_GPIO_HPD_EN                        0x64b8
 #define EVERGREEN_DC_GPIO_HPD_Y                         0x64bc
 
+/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
+#define EVERGREEN_HDMI_BASE                            0x7030
+
+#define EVERGREEN_HDMI_CONFIG_OFFSET                   0xf0
+
 #endif
index e00039e..b502216 100644 (file)
 #define        PA_CL_ENHANCE                                   0x8A14
 #define                CLIP_VTX_REORDER_ENA                            (1 << 0)
 #define                NUM_CLIP_SEQ(x)                                 ((x) << 1)
+#define        PA_SC_ENHANCE                                   0x8BF0
 #define PA_SC_AA_CONFIG                                        0x28C04
 #define         MSAA_NUM_SAMPLES_SHIFT                  0
 #define         MSAA_NUM_SAMPLES_MASK                   0x3
 #define        SQ_GPR_RESOURCE_MGMT_3                          0x8C0C
 #define                NUM_HS_GPRS(x)                                  ((x) << 0)
 #define                NUM_LS_GPRS(x)                                  ((x) << 16)
+#define        SQ_GLOBAL_GPR_RESOURCE_MGMT_1                   0x8C10
+#define        SQ_GLOBAL_GPR_RESOURCE_MGMT_2                   0x8C14
 #define        SQ_THREAD_RESOURCE_MGMT                         0x8C18
 #define                NUM_PS_THREADS(x)                               ((x) << 0)
 #define                NUM_VS_THREADS(x)                               ((x) << 8)
 #define                NUM_HS_STACK_ENTRIES(x)                         ((x) << 0)
 #define                NUM_LS_STACK_ENTRIES(x)                         ((x) << 16)
 #define        SQ_DYN_GPR_CNTL_PS_FLUSH_REQ                    0x8D8C
+#define        SQ_DYN_GPR_SIMD_LOCK_EN                         0x8D94
+#define        SQ_STATIC_THREAD_MGMT_1                         0x8E20
+#define        SQ_STATIC_THREAD_MGMT_2                         0x8E24
+#define        SQ_STATIC_THREAD_MGMT_3                         0x8E28
 #define        SQ_LDS_RESOURCE_MGMT                            0x8E2C
 
 #define        SQ_MS_FIFO_SIZES                                0x8CF0
 #define        PACKET3_DRAW_INDEX_MULTI_ELEMENT                0x36
 #define        PACKET3_MEM_SEMAPHORE                           0x39
 #define        PACKET3_MPEG_INDEX                              0x3A
+#define        PACKET3_COPY_DW                                 0x3B
 #define        PACKET3_WAIT_REG_MEM                            0x3C
 #define        PACKET3_MEM_WRITE                               0x3D
 #define        PACKET3_INDIRECT_BUFFER                         0x32
 #define                        SQ_TEX_VTX_VALID_TEXTURE                        0x2
 #define                        SQ_TEX_VTX_VALID_BUFFER                         0x3
 
+#define VGT_VTX_VECT_EJECT_REG                         0x88b0
+
 #define SQ_CONST_MEM_BASE                              0x8df8
 
 #define SQ_ESGS_RING_BASE                              0x8c40
 #define PA_SC_SCREEN_SCISSOR_TL                         0x28030
 #define PA_SC_GENERIC_SCISSOR_TL                        0x28240
 #define PA_SC_WINDOW_SCISSOR_TL                         0x28204
-#define VGT_PRIMITIVE_TYPE                              0x8958
 
+#define VGT_PRIMITIVE_TYPE                              0x8958
+#define VGT_INDEX_TYPE                                  0x895C
+
+#define VGT_NUM_INDICES                                 0x8970
+
+#define VGT_COMPUTE_DIM_X                               0x8990
+#define VGT_COMPUTE_DIM_Y                               0x8994
+#define VGT_COMPUTE_DIM_Z                               0x8998
+#define VGT_COMPUTE_START_X                             0x899C
+#define VGT_COMPUTE_START_Y                             0x89A0
+#define VGT_COMPUTE_START_Z                             0x89A4
+#define VGT_COMPUTE_INDEX                               0x89A8
+#define VGT_COMPUTE_THREAD_GROUP_SIZE                   0x89AC
+#define VGT_HS_OFFCHIP_PARAM                            0x89B0
+
+#define DB_DEBUG                                       0x9830
+#define DB_DEBUG2                                      0x9834
+#define DB_DEBUG3                                      0x9838
+#define DB_DEBUG4                                      0x983C
+#define DB_WATERMARKS                                  0x9854
 #define DB_DEPTH_CONTROL                               0x28800
 #define DB_DEPTH_VIEW                                  0x28008
 #define DB_HTILE_DATA_BASE                             0x28014
 #define SQ_VTX_CONSTANT_WORD6_0                         0x30018
 #define SQ_VTX_CONSTANT_WORD7_0                         0x3001c
 
+#define TD_PS_BORDER_COLOR_INDEX                        0xA400
+#define TD_PS_BORDER_COLOR_RED                          0xA404
+#define TD_PS_BORDER_COLOR_GREEN                        0xA408
+#define TD_PS_BORDER_COLOR_BLUE                         0xA40C
+#define TD_PS_BORDER_COLOR_ALPHA                        0xA410
+#define TD_VS_BORDER_COLOR_INDEX                        0xA414
+#define TD_VS_BORDER_COLOR_RED                          0xA418
+#define TD_VS_BORDER_COLOR_GREEN                        0xA41C
+#define TD_VS_BORDER_COLOR_BLUE                         0xA420
+#define TD_VS_BORDER_COLOR_ALPHA                        0xA424
+#define TD_GS_BORDER_COLOR_INDEX                        0xA428
+#define TD_GS_BORDER_COLOR_RED                          0xA42C
+#define TD_GS_BORDER_COLOR_GREEN                        0xA430
+#define TD_GS_BORDER_COLOR_BLUE                         0xA434
+#define TD_GS_BORDER_COLOR_ALPHA                        0xA438
+#define TD_HS_BORDER_COLOR_INDEX                        0xA43C
+#define TD_HS_BORDER_COLOR_RED                          0xA440
+#define TD_HS_BORDER_COLOR_GREEN                        0xA444
+#define TD_HS_BORDER_COLOR_BLUE                         0xA448
+#define TD_HS_BORDER_COLOR_ALPHA                        0xA44C
+#define TD_LS_BORDER_COLOR_INDEX                        0xA450
+#define TD_LS_BORDER_COLOR_RED                          0xA454
+#define TD_LS_BORDER_COLOR_GREEN                        0xA458
+#define TD_LS_BORDER_COLOR_BLUE                         0xA45C
+#define TD_LS_BORDER_COLOR_ALPHA                        0xA460
+#define TD_CS_BORDER_COLOR_INDEX                        0xA464
+#define TD_CS_BORDER_COLOR_RED                          0xA468
+#define TD_CS_BORDER_COLOR_GREEN                        0xA46C
+#define TD_CS_BORDER_COLOR_BLUE                         0xA470
+#define TD_CS_BORDER_COLOR_ALPHA                        0xA474
+
 /* cayman 3D regs */
-#define CAYMAN_VGT_OFFCHIP_LDS_BASE                    0x89B0
+#define CAYMAN_VGT_OFFCHIP_LDS_BASE                    0x89B4
+#define CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS                 0x8E48
 #define CAYMAN_DB_EQAA                                 0x28804
 #define CAYMAN_DB_DEPTH_INFO                           0x2803C
 #define CAYMAN_PA_SC_AA_CONFIG                         0x28BE0
index 0e57998..3211372 100644 (file)
@@ -934,7 +934,7 @@ void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
 
 int cayman_pcie_gart_enable(struct radeon_device *rdev)
 {
-       int r;
+       int i, r;
 
        if (rdev->gart.robj == NULL) {
                dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
@@ -945,9 +945,12 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
                return r;
        radeon_gart_restore(rdev);
        /* Setup TLB control */
-       WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB |
+       WREG32(MC_VM_MX_L1_TLB_CNTL,
+              (0xA << 7) |
+              ENABLE_L1_TLB |
               ENABLE_L1_FRAGMENT_PROCESSING |
               SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+              ENABLE_ADVANCED_DRIVER_MODEL |
               SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
        /* Setup L2 cache */
        WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
@@ -967,9 +970,26 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
        WREG32(VM_CONTEXT0_CNTL2, 0);
        WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
                                RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
-       /* disable context1-7 */
+
+       WREG32(0x15D4, 0);
+       WREG32(0x15D8, 0);
+       WREG32(0x15DC, 0);
+
+       /* empty context1-7 */
+       for (i = 1; i < 8; i++) {
+               WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
+               WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), 0);
+               WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
+                       rdev->gart.table_addr >> 12);
+       }
+
+       /* enable context1-7 */
+       WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+              (u32)(rdev->dummy_page.addr >> 12));
        WREG32(VM_CONTEXT1_CNTL2, 0);
        WREG32(VM_CONTEXT1_CNTL, 0);
+       WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+                               RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
 
        cayman_pcie_gart_tlb_flush(rdev);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -1006,9 +1026,69 @@ void cayman_pcie_gart_fini(struct radeon_device *rdev)
        radeon_gart_fini(rdev);
 }
 
+void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+                             int ring, u32 cp_int_cntl)
+{
+       u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
+
+       WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
+       WREG32(CP_INT_CNTL, cp_int_cntl);
+}
+
 /*
  * CP.
  */
+void cayman_fence_ring_emit(struct radeon_device *rdev,
+                           struct radeon_fence *fence)
+{
+       struct radeon_ring *ring = &rdev->ring[fence->ring];
+       u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+       /* flush read cache over gart for this vmid */
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+       radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+       radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+       radeon_ring_write(ring, 0xFFFFFFFF);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 10); /* poll interval */
+       /* EVENT_WRITE_EOP - flush caches, send int */
+       radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+       radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+       radeon_ring_write(ring, addr & 0xffffffff);
+       radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+       radeon_ring_write(ring, fence->seq);
+       radeon_ring_write(ring, 0);
+}
+
+void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+       struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+
+       /* set to DX10/11 mode */
+       radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+       radeon_ring_write(ring, 1);
+       radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+       radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+                         (2 << 0) |
+#endif
+                         (ib->gpu_addr & 0xFFFFFFFC));
+       radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+       radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24));
+
+       /* flush read cache over gart for this vmid */
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+       radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+       radeon_ring_write(ring, ib->vm_id);
+       radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+       radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+       radeon_ring_write(ring, 0xFFFFFFFF);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 10); /* poll interval */
+}
+
 static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
 {
        if (enable)
@@ -1049,63 +1129,64 @@ static int cayman_cp_load_microcode(struct radeon_device *rdev)
 
 static int cayman_cp_start(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r, i;
 
-       r = radeon_ring_lock(rdev, 7);
+       r = radeon_ring_lock(rdev, ring, 7);
        if (r) {
                DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
                return r;
        }
-       radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
-       radeon_ring_write(rdev, 0x1);
-       radeon_ring_write(rdev, 0x0);
-       radeon_ring_write(rdev, rdev->config.cayman.max_hw_contexts - 1);
-       radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+       radeon_ring_write(ring, 0x1);
+       radeon_ring_write(ring, 0x0);
+       radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
+       radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_unlock_commit(rdev, ring);
 
        cayman_cp_enable(rdev, true);
 
-       r = radeon_ring_lock(rdev, cayman_default_size + 19);
+       r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
        if (r) {
                DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
                return r;
        }
 
        /* setup clear context state */
-       radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
-       radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+       radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+       radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
 
        for (i = 0; i < cayman_default_size; i++)
-               radeon_ring_write(rdev, cayman_default_state[i]);
+               radeon_ring_write(ring, cayman_default_state[i]);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
-       radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
+       radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+       radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
 
        /* set clear context state */
-       radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+       radeon_ring_write(ring, 0);
 
        /* SQ_VTX_BASE_VTX_LOC */
-       radeon_ring_write(rdev, 0xc0026f00);
-       radeon_ring_write(rdev, 0x00000000);
-       radeon_ring_write(rdev, 0x00000000);
-       radeon_ring_write(rdev, 0x00000000);
+       radeon_ring_write(ring, 0xc0026f00);
+       radeon_ring_write(ring, 0x00000000);
+       radeon_ring_write(ring, 0x00000000);
+       radeon_ring_write(ring, 0x00000000);
 
        /* Clear consts */
-       radeon_ring_write(rdev, 0xc0036f00);
-       radeon_ring_write(rdev, 0x00000bc4);
-       radeon_ring_write(rdev, 0xffffffff);
-       radeon_ring_write(rdev, 0xffffffff);
-       radeon_ring_write(rdev, 0xffffffff);
+       radeon_ring_write(ring, 0xc0036f00);
+       radeon_ring_write(ring, 0x00000bc4);
+       radeon_ring_write(ring, 0xffffffff);
+       radeon_ring_write(ring, 0xffffffff);
+       radeon_ring_write(ring, 0xffffffff);
 
-       radeon_ring_write(rdev, 0xc0026900);
-       radeon_ring_write(rdev, 0x00000316);
-       radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
-       radeon_ring_write(rdev, 0x00000010); /*  */
+       radeon_ring_write(ring, 0xc0026900);
+       radeon_ring_write(ring, 0x00000316);
+       radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+       radeon_ring_write(ring, 0x00000010); /*  */
 
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_unlock_commit(rdev, ring);
 
        /* XXX init other rings */
 
@@ -1115,11 +1196,12 @@ static int cayman_cp_start(struct radeon_device *rdev)
 static void cayman_cp_fini(struct radeon_device *rdev)
 {
        cayman_cp_enable(rdev, false);
-       radeon_ring_fini(rdev);
+       radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
 }
 
 int cayman_cp_resume(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring;
        u32 tmp;
        u32 rb_bufsz;
        int r;
@@ -1136,7 +1218,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
        WREG32(GRBM_SOFT_RESET, 0);
        RREG32(GRBM_SOFT_RESET);
 
-       WREG32(CP_SEM_WAIT_TIMER, 0x4);
+       WREG32(CP_SEM_WAIT_TIMER, 0x0);
 
        /* Set the write pointer delay */
        WREG32(CP_RB_WPTR_DELAY, 0);
@@ -1145,7 +1227,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
 
        /* ring 0 - compute and gfx */
        /* Set ring buffer size */
-       rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+       ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+       rb_bufsz = drm_order(ring->ring_size / 8);
        tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
@@ -1154,8 +1237,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
 
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
-       rdev->cp.wptr = 0;
-       WREG32(CP_RB0_WPTR, rdev->cp.wptr);
+       ring->wptr = 0;
+       WREG32(CP_RB0_WPTR, ring->wptr);
 
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1172,13 +1255,14 @@ int cayman_cp_resume(struct radeon_device *rdev)
        mdelay(1);
        WREG32(CP_RB0_CNTL, tmp);
 
-       WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
+       WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
 
-       rdev->cp.rptr = RREG32(CP_RB0_RPTR);
+       ring->rptr = RREG32(CP_RB0_RPTR);
 
        /* ring1  - compute only */
        /* Set ring buffer size */
-       rb_bufsz = drm_order(rdev->cp1.ring_size / 8);
+       ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+       rb_bufsz = drm_order(ring->ring_size / 8);
        tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
@@ -1187,8 +1271,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
 
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
-       rdev->cp1.wptr = 0;
-       WREG32(CP_RB1_WPTR, rdev->cp1.wptr);
+       ring->wptr = 0;
+       WREG32(CP_RB1_WPTR, ring->wptr);
 
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1197,13 +1281,14 @@ int cayman_cp_resume(struct radeon_device *rdev)
        mdelay(1);
        WREG32(CP_RB1_CNTL, tmp);
 
-       WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
+       WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
 
-       rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
+       ring->rptr = RREG32(CP_RB1_RPTR);
 
        /* ring2 - compute only */
        /* Set ring buffer size */
-       rb_bufsz = drm_order(rdev->cp2.ring_size / 8);
+       ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+       rb_bufsz = drm_order(ring->ring_size / 8);
        tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
@@ -1212,8 +1297,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
 
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
-       rdev->cp2.wptr = 0;
-       WREG32(CP_RB2_WPTR, rdev->cp2.wptr);
+       ring->wptr = 0;
+       WREG32(CP_RB2_WPTR, ring->wptr);
 
        /* set the wb address wether it's enabled or not */
        WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1222,28 +1307,28 @@ int cayman_cp_resume(struct radeon_device *rdev)
        mdelay(1);
        WREG32(CP_RB2_CNTL, tmp);
 
-       WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
+       WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
 
-       rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
+       ring->rptr = RREG32(CP_RB2_RPTR);
 
        /* start the rings */
        cayman_cp_start(rdev);
-       rdev->cp.ready = true;
-       rdev->cp1.ready = true;
-       rdev->cp2.ready = true;
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
+       rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+       rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
        /* this only test cp0 */
-       r = radeon_ring_test(rdev);
+       r = radeon_ring_test(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
        if (r) {
-               rdev->cp.ready = false;
-               rdev->cp1.ready = false;
-               rdev->cp2.ready = false;
+               rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+               rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+               rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
                return r;
        }
 
        return 0;
 }
 
-bool cayman_gpu_is_lockup(struct radeon_device *rdev)
+bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        u32 srbm_status;
        u32 grbm_status;
@@ -1256,20 +1341,20 @@ bool cayman_gpu_is_lockup(struct radeon_device *rdev)
        grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
        grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
        if (!(grbm_status & GUI_ACTIVE)) {
-               r100_gpu_lockup_update(lockup, &rdev->cp);
+               r100_gpu_lockup_update(lockup, ring);
                return false;
        }
        /* force CP activities */
-       r = radeon_ring_lock(rdev, 2);
+       r = radeon_ring_lock(rdev, ring, 2);
        if (!r) {
                /* PACKET2 NOP */
-               radeon_ring_write(rdev, 0x80000000);
-               radeon_ring_write(rdev, 0x80000000);
-               radeon_ring_unlock_commit(rdev);
+               radeon_ring_write(ring, 0x80000000);
+               radeon_ring_write(ring, 0x80000000);
+               radeon_ring_unlock_commit(rdev, ring);
        }
        /* XXX deal with CP0,1,2 */
-       rdev->cp.rptr = RREG32(CP_RB0_RPTR);
-       return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
+       ring->rptr = RREG32(ring->rptr_reg);
+       return r100_gpu_cp_is_lockup(rdev, lockup, ring);
 }
 
 static int cayman_gpu_soft_reset(struct radeon_device *rdev)
@@ -1289,6 +1374,15 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
                RREG32(GRBM_STATUS_SE1));
        dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
                RREG32(SRBM_STATUS));
+       dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_ADDR   0x%08X\n",
+                RREG32(0x14F8));
+       dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
+                RREG32(0x14D8));
+       dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+                RREG32(0x14FC));
+       dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+                RREG32(0x14DC));
+
        evergreen_mc_stop(rdev, &save);
        if (evergreen_mc_wait_for_idle(rdev)) {
                dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
@@ -1319,6 +1413,7 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
        (void)RREG32(GRBM_SOFT_RESET);
        /* Wait a little for things to settle down */
        udelay(50);
+
        dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
                RREG32(GRBM_STATUS));
        dev_info(rdev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
@@ -1338,6 +1433,7 @@ int cayman_asic_reset(struct radeon_device *rdev)
 
 static int cayman_startup(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r;
 
        /* enable pcie gen2 link */
@@ -1378,6 +1474,24 @@ static int cayman_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
+       r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
+       r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
@@ -1387,7 +1501,9 @@ static int cayman_startup(struct radeon_device *rdev)
        }
        evergreen_irq_set(rdev);
 
-       r = radeon_ring_init(rdev, rdev->cp.ring_size);
+       r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+                            CP_RB0_RPTR, CP_RB0_WPTR,
+                            0, 0xfffff, RADEON_CP_PACKET2);
        if (r)
                return r;
        r = cayman_cp_load_microcode(rdev);
@@ -1397,6 +1513,21 @@ static int cayman_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
+               return r;
+       }
+
+       r = radeon_vm_manager_start(rdev);
+       if (r)
+               return r;
+
        return 0;
 }
 
@@ -1411,32 +1542,26 @@ int cayman_resume(struct radeon_device *rdev)
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
 
+       rdev->accel_working = true;
        r = cayman_startup(rdev);
        if (r) {
                DRM_ERROR("cayman startup failed on resume\n");
                return r;
        }
-
-       r = r600_ib_test(rdev);
-       if (r) {
-               DRM_ERROR("radeon: failled testing IB (%d).\n", r);
-               return r;
-       }
-
        return r;
-
 }
 
 int cayman_suspend(struct radeon_device *rdev)
 {
        /* FIXME: we should wait for ring to be empty */
+       radeon_ib_pool_suspend(rdev);
+       radeon_vm_manager_suspend(rdev);
+       r600_blit_suspend(rdev);
        cayman_cp_enable(rdev, false);
-       rdev->cp.ready = false;
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
        evergreen_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        cayman_pcie_gart_disable(rdev);
-       r600_blit_suspend(rdev);
-
        return 0;
 }
 
@@ -1448,6 +1573,7 @@ int cayman_suspend(struct radeon_device *rdev)
  */
 int cayman_init(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r;
 
        /* This don't do much */
@@ -1500,8 +1626,8 @@ int cayman_init(struct radeon_device *rdev)
        if (r)
                return r;
 
-       rdev->cp.ring_obj = NULL;
-       r600_ring_init(rdev, 1024 * 1024);
+       ring->ring_obj = NULL;
+       r600_ring_init(rdev, ring, 1024 * 1024);
 
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
@@ -1510,29 +1636,29 @@ int cayman_init(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+       r = radeon_vm_manager_init(rdev);
+       if (r) {
+               dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
+       }
+
        r = cayman_startup(rdev);
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                cayman_cp_fini(rdev);
                r600_irq_fini(rdev);
                radeon_wb_fini(rdev);
+               r100_ib_fini(rdev);
+               radeon_vm_manager_fini(rdev);
                radeon_irq_kms_fini(rdev);
                cayman_pcie_gart_fini(rdev);
                rdev->accel_working = false;
        }
-       if (rdev->accel_working) {
-               r = radeon_ib_pool_init(rdev);
-               if (r) {
-                       DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
-                       rdev->accel_working = false;
-               }
-               r = r600_ib_test(rdev);
-               if (r) {
-                       DRM_ERROR("radeon: failed testing IB (%d).\n", r);
-                       rdev->accel_working = false;
-               }
-       }
 
        /* Don't start up if the MC ucode is missing.
         * The default clocks and voltages before the MC ucode
@@ -1552,11 +1678,13 @@ void cayman_fini(struct radeon_device *rdev)
        cayman_cp_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
-       radeon_ib_pool_fini(rdev);
+       radeon_vm_manager_fini(rdev);
+       r100_ib_fini(rdev);
        radeon_irq_kms_fini(rdev);
        cayman_pcie_gart_fini(rdev);
        r600_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
+       radeon_semaphore_driver_fini(rdev);
        radeon_fence_driver_fini(rdev);
        radeon_bo_fini(rdev);
        radeon_atombios_fini(rdev);
@@ -1564,3 +1692,84 @@ void cayman_fini(struct radeon_device *rdev)
        rdev->bios = NULL;
 }
 
+/*
+ * vm
+ */
+int cayman_vm_init(struct radeon_device *rdev)
+{
+       /* number of VMs */
+       rdev->vm_manager.nvm = 8;
+       /* base offset of vram pages */
+       rdev->vm_manager.vram_base_offset = 0;
+       return 0;
+}
+
+void cayman_vm_fini(struct radeon_device *rdev)
+{
+}
+
+int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id)
+{
+       WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (id << 2), 0);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (id << 2), vm->last_pfn);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12);
+       /* flush hdp cache */
+       WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+       /* bits 0-7 are the VM contexts0-7 */
+       WREG32(VM_INVALIDATE_REQUEST, 1 << id);
+       return 0;
+}
+
+void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+       WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0);
+       /* flush hdp cache */
+       WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+       /* bits 0-7 are the VM contexts0-7 */
+       WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
+}
+
+void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+       if (vm->id == -1)
+               return;
+
+       /* flush hdp cache */
+       WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+       /* bits 0-7 are the VM contexts0-7 */
+       WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
+}
+
+#define R600_PTE_VALID     (1 << 0)
+#define R600_PTE_SYSTEM    (1 << 1)
+#define R600_PTE_SNOOPED   (1 << 2)
+#define R600_PTE_READABLE  (1 << 5)
+#define R600_PTE_WRITEABLE (1 << 6)
+
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
+                             struct radeon_vm *vm,
+                             uint32_t flags)
+{
+       uint32_t r600_flags = 0;
+
+       r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
+       r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
+       r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
+       if (flags & RADEON_VM_PAGE_SYSTEM) {
+               r600_flags |= R600_PTE_SYSTEM;
+               r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
+       }
+       return r600_flags;
+}
+
+void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
+                       unsigned pfn, uint64_t addr, uint32_t flags)
+{
+       void __iomem *ptr = (void *)vm->pt;
+
+       addr = addr & 0xFFFFFFFFFFFFF000ULL;
+       addr |= flags;
+       writeq(addr, ptr + (pfn * 8));
+}
index 4672869..f9df2a6 100644 (file)
@@ -42,6 +42,9 @@
 #define CAYMAN_MAX_TCC_MASK          0xFF
 
 #define DMIF_ADDR_CONFIG                               0xBD4
+#define        SRBM_GFX_CNTL                                   0x0E44
+#define                RINGID(x)                                       (((x) & 0x3) << 0)
+#define                VMID(x)                                         (((x) & 0x7) << 0)
 #define        SRBM_STATUS                                     0x0E50
 
 #define VM_CONTEXT0_REQUEST_RESPONSE                   0x1470
 #define        SCRATCH_UMSK                                    0x8540
 #define        SCRATCH_ADDR                                    0x8544
 #define        CP_SEM_WAIT_TIMER                               0x85BC
+#define        CP_COHER_CNTL2                                  0x85E8
 #define CP_ME_CNTL                                     0x86D8
 #define                CP_ME_HALT                                      (1 << 28)
 #define                CP_PFP_HALT                                     (1 << 26)
 #define        CP_RB0_RPTR_ADDR                                0xC10C
 #define        CP_RB0_RPTR_ADDR_HI                             0xC110
 #define        CP_RB0_WPTR                                     0xC114
+
+#define CP_INT_CNTL                                     0xC124
+#       define CNTX_BUSY_INT_ENABLE                     (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                    (1 << 20)
+#       define TIME_STAMP_INT_ENABLE                    (1 << 26)
+
 #define        CP_RB1_BASE                                     0xC180
 #define        CP_RB1_CNTL                                     0xC184
 #define        CP_RB1_RPTR_ADDR                                0xC188
 #define        CP_ME_RAM_DATA                                  0xC160
 #define        CP_DEBUG                                        0xC1FC
 
+#define VGT_EVENT_INITIATOR                             0x28a90
+#       define CACHE_FLUSH_AND_INV_EVENT_TS                     (0x14 << 0)
+#       define CACHE_FLUSH_AND_INV_EVENT                        (0x16 << 0)
+
 /*
  * PM4
  */
 #define        PACKET3_DISPATCH_DIRECT                         0x15
 #define        PACKET3_DISPATCH_INDIRECT                       0x16
 #define        PACKET3_INDIRECT_BUFFER_END                     0x17
+#define        PACKET3_MODE_CONTROL                            0x18
 #define        PACKET3_SET_PREDICATION                         0x20
 #define        PACKET3_REG_RMW                                 0x21
 #define        PACKET3_COND_EXEC                               0x22
 #define                PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
 #define        PACKET3_COND_WRITE                              0x45
 #define        PACKET3_EVENT_WRITE                             0x46
+#define                EVENT_TYPE(x)                           ((x) << 0)
+#define                EVENT_INDEX(x)                          ((x) << 8)
+                /* 0 - any non-TS event
+                * 1 - ZPASS_DONE
+                * 2 - SAMPLE_PIPELINESTAT
+                * 3 - SAMPLE_STREAMOUTSTAT*
+                * 4 - *S_PARTIAL_FLUSH
+                * 5 - TS events
+                */
 #define        PACKET3_EVENT_WRITE_EOP                         0x47
+#define                DATA_SEL(x)                             ((x) << 29)
+                /* 0 - discard
+                * 1 - send low 32bit data
+                * 2 - send 64bit data
+                * 3 - send 64bit counter value
+                */
+#define                INT_SEL(x)                              ((x) << 24)
+                /* 0 - none
+                * 1 - interrupt only (DATA_SEL = 0)
+                * 2 - interrupt when data write is confirmed
+                */
 #define        PACKET3_EVENT_WRITE_EOS                         0x48
 #define        PACKET3_PREAMBLE_CNTL                           0x4A
 #              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
index bfc08f6..3ec81c3 100644 (file)
@@ -667,7 +667,7 @@ int r100_irq_set(struct radeon_device *rdev)
                WREG32(R_000040_GEN_INT_CNTL, 0);
                return -EINVAL;
        }
-       if (rdev->irq.sw_int) {
+       if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
                tmp |= RADEON_SW_INT_ENABLE;
        }
        if (rdev->irq.gui_idle) {
@@ -739,7 +739,7 @@ int r100_irq_process(struct radeon_device *rdev)
        while (status) {
                /* SW interrupt */
                if (status & RADEON_SW_INT_TEST) {
-                       radeon_fence_process(rdev);
+                       radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                }
                /* gui idle interrupt */
                if (status & RADEON_GUI_IDLE_STAT) {
@@ -811,25 +811,36 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
 void r100_fence_ring_emit(struct radeon_device *rdev,
                          struct radeon_fence *fence)
 {
+       struct radeon_ring *ring = &rdev->ring[fence->ring];
+
        /* We have to make sure that caches are flushed before
         * CPU might read something from VRAM. */
-       radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
-       radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
+       radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
+       radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
        /* Wait until IDLE & CLEAN */
-       radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
-       radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
-       radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
-       radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
+       radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+       radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
+       radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+       radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
                                RADEON_HDP_READ_BUFFER_INVALIDATE);
-       radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
-       radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
+       radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+       radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
        /* Emit fence sequence & fire IRQ */
-       radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
-       radeon_ring_write(rdev, fence->seq);
-       radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
-       radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
+       radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
+       radeon_ring_write(ring, fence->seq);
+       radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
+       radeon_ring_write(ring, RADEON_SW_INT_FIRE);
+}
+
+void r100_semaphore_ring_emit(struct radeon_device *rdev,
+                             struct radeon_ring *ring,
+                             struct radeon_semaphore *semaphore,
+                             bool emit_wait)
+{
+       /* Unused on older asics, since we don't have semaphores or multiple rings */
+       BUG();
 }
 
 int r100_copy_blit(struct radeon_device *rdev,
@@ -838,6 +849,7 @@ int r100_copy_blit(struct radeon_device *rdev,
                   unsigned num_gpu_pages,
                   struct radeon_fence *fence)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        uint32_t cur_pages;
        uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
        uint32_t pitch;
@@ -855,7 +867,7 @@ int r100_copy_blit(struct radeon_device *rdev,
 
        /* Ask for enough room for blit + flush + fence */
        ndw = 64 + (10 * num_loops);
-       r = radeon_ring_lock(rdev, ndw);
+       r = radeon_ring_lock(rdev, ring, ndw);
        if (r) {
                DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
                return -EINVAL;
@@ -869,8 +881,8 @@ int r100_copy_blit(struct radeon_device *rdev,
 
                /* pages are in Y direction - height
                   page width in X direction - width */
-               radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
-               radeon_ring_write(rdev,
+               radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
+               radeon_ring_write(ring,
                                  RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
                                  RADEON_GMC_DST_PITCH_OFFSET_CNTL |
                                  RADEON_GMC_SRC_CLIPPING |
@@ -882,26 +894,26 @@ int r100_copy_blit(struct radeon_device *rdev,
                                  RADEON_DP_SRC_SOURCE_MEMORY |
                                  RADEON_GMC_CLR_CMP_CNTL_DIS |
                                  RADEON_GMC_WR_MSK_DIS);
-               radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
-               radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
-               radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
-               radeon_ring_write(rdev, 0);
-               radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
-               radeon_ring_write(rdev, num_gpu_pages);
-               radeon_ring_write(rdev, num_gpu_pages);
-               radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
-       }
-       radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
-       radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
-       radeon_ring_write(rdev,
+               radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
+               radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
+               radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
+               radeon_ring_write(ring, 0);
+               radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
+               radeon_ring_write(ring, num_gpu_pages);
+               radeon_ring_write(ring, num_gpu_pages);
+               radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
+       }
+       radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
+       radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+       radeon_ring_write(ring,
                          RADEON_WAIT_2D_IDLECLEAN |
                          RADEON_WAIT_HOST_IDLECLEAN |
                          RADEON_WAIT_DMA_GUI_IDLE);
        if (fence) {
                r = radeon_fence_emit(rdev, fence);
        }
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_unlock_commit(rdev, ring);
        return r;
 }
 
@@ -922,19 +934,20 @@ static int r100_cp_wait_for_idle(struct radeon_device *rdev)
 
 void r100_ring_start(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r;
 
-       r = radeon_ring_lock(rdev, 2);
+       r = radeon_ring_lock(rdev, ring, 2);
        if (r) {
                return;
        }
-       radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
+       radeon_ring_write(ring,
                          RADEON_ISYNC_ANY2D_IDLE3D |
                          RADEON_ISYNC_ANY3D_IDLE2D |
                          RADEON_ISYNC_WAIT_IDLEGUI |
                          RADEON_ISYNC_CPSCRATCH_IDLEGUI);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_unlock_commit(rdev, ring);
 }
 
 
@@ -1035,6 +1048,7 @@ static void r100_cp_load_microcode(struct radeon_device *rdev)
 
 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        unsigned rb_bufsz;
        unsigned rb_blksz;
        unsigned max_fetch;
@@ -1060,7 +1074,9 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
        rb_bufsz = drm_order(ring_size / 8);
        ring_size = (1 << (rb_bufsz + 1)) * 4;
        r100_cp_load_microcode(rdev);
-       r = radeon_ring_init(rdev, ring_size);
+       r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
+                            RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
+                            0, 0x7fffff, RADEON_CP_PACKET2);
        if (r) {
                return r;
        }
@@ -1069,7 +1085,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
        rb_blksz = 9;
        /* cp will read 128bytes at a time (4 dwords) */
        max_fetch = 1;
-       rdev->cp.align_mask = 16 - 1;
+       ring->align_mask = 16 - 1;
        /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
        pre_write_timer = 64;
        /* Force CP_RB_WPTR write if written more than one time before the
@@ -1099,13 +1115,13 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
        WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
 
        /* Set ring address */
-       DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
-       WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
+       DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
+       WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
        /* Force read & write ptr to 0 */
        WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
        WREG32(RADEON_CP_RB_RPTR_WR, 0);
-       rdev->cp.wptr = 0;
-       WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
+       ring->wptr = 0;
+       WREG32(RADEON_CP_RB_WPTR, ring->wptr);
 
        /* set the wb address whether it's enabled or not */
        WREG32(R_00070C_CP_RB_RPTR_ADDR,
@@ -1121,7 +1137,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
 
        WREG32(RADEON_CP_RB_CNTL, tmp);
        udelay(10);
-       rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+       ring->rptr = RREG32(RADEON_CP_RB_RPTR);
        /* Set cp mode to bus mastering & enable cp*/
        WREG32(RADEON_CP_CSQ_MODE,
               REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
@@ -1130,12 +1146,12 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
        WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
        WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
        radeon_ring_start(rdev);
-       r = radeon_ring_test(rdev);
+       r = radeon_ring_test(rdev, ring);
        if (r) {
                DRM_ERROR("radeon: cp isn't working (%d).\n", r);
                return r;
        }
-       rdev->cp.ready = true;
+       ring->ready = true;
        radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
        return 0;
 }
@@ -1147,7 +1163,7 @@ void r100_cp_fini(struct radeon_device *rdev)
        }
        /* Disable ring */
        r100_cp_disable(rdev);
-       radeon_ring_fini(rdev);
+       radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
        DRM_INFO("radeon: cp finalized\n");
 }
 
@@ -1155,7 +1171,7 @@ void r100_cp_disable(struct radeon_device *rdev)
 {
        /* Disable ring */
        radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
-       rdev->cp.ready = false;
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
        WREG32(RADEON_CP_CSQ_MODE, 0);
        WREG32(RADEON_CP_CSQ_CNTL, 0);
        WREG32(R_000770_SCRATCH_UMSK, 0);
@@ -1165,13 +1181,6 @@ void r100_cp_disable(struct radeon_device *rdev)
        }
 }
 
-void r100_cp_commit(struct radeon_device *rdev)
-{
-       WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
-       (void)RREG32(RADEON_CP_RB_WPTR);
-}
-
-
 /*
  * CS functions
  */
@@ -2099,9 +2108,9 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
        return -1;
 }
 
-void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
+void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
 {
-       lockup->last_cp_rptr = cp->rptr;
+       lockup->last_cp_rptr = ring->rptr;
        lockup->last_jiffies = jiffies;
 }
 
@@ -2126,20 +2135,20 @@ void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp
  * false positive when CP is just gived nothing to do.
  *
  **/
-bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
+bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
 {
        unsigned long cjiffies, elapsed;
 
        cjiffies = jiffies;
        if (!time_after(cjiffies, lockup->last_jiffies)) {
                /* likely a wrap around */
-               lockup->last_cp_rptr = cp->rptr;
+               lockup->last_cp_rptr = ring->rptr;
                lockup->last_jiffies = jiffies;
                return false;
        }
-       if (cp->rptr != lockup->last_cp_rptr) {
+       if (ring->rptr != lockup->last_cp_rptr) {
                /* CP is still working no lockup */
-               lockup->last_cp_rptr = cp->rptr;
+               lockup->last_cp_rptr = ring->rptr;
                lockup->last_jiffies = jiffies;
                return false;
        }
@@ -2152,31 +2161,32 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *l
        return false;
 }
 
-bool r100_gpu_is_lockup(struct radeon_device *rdev)
+bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        u32 rbbm_status;
        int r;
 
        rbbm_status = RREG32(R_000E40_RBBM_STATUS);
        if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
-               r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
+               r100_gpu_lockup_update(&rdev->config.r100.lockup, ring);
                return false;
        }
        /* force CP activities */
-       r = radeon_ring_lock(rdev, 2);
+       r = radeon_ring_lock(rdev, ring, 2);
        if (!r) {
                /* PACKET2 NOP */
-               radeon_ring_write(rdev, 0x80000000);
-               radeon_ring_write(rdev, 0x80000000);
-               radeon_ring_unlock_commit(rdev);
+               radeon_ring_write(ring, 0x80000000);
+               radeon_ring_write(ring, 0x80000000);
+               radeon_ring_unlock_commit(rdev, ring);
        }
-       rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
-       return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
+       ring->rptr = RREG32(ring->rptr_reg);
+       return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, ring);
 }
 
 void r100_bm_disable(struct radeon_device *rdev)
 {
        u32 tmp;
+       u16 tmp16;
 
        /* disable bus mastering */
        tmp = RREG32(R_000030_BUS_CNTL);
@@ -2187,8 +2197,8 @@ void r100_bm_disable(struct radeon_device *rdev)
        WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
        tmp = RREG32(RADEON_BUS_CNTL);
        mdelay(1);
-       pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
-       pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
+       pci_read_config_word(rdev->pdev, 0x4, &tmp16);
+       pci_write_config_word(rdev->pdev, 0x4, tmp16 & 0xFFFB);
        mdelay(1);
 }
 
@@ -2579,21 +2589,22 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        struct radeon_device *rdev = dev->dev_private;
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        uint32_t rdp, wdp;
        unsigned count, i, j;
 
-       radeon_ring_free_size(rdev);
+       radeon_ring_free_size(rdev, ring);
        rdp = RREG32(RADEON_CP_RB_RPTR);
        wdp = RREG32(RADEON_CP_RB_WPTR);
-       count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
+       count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
        seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
        seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
        seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
-       seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
+       seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
        seq_printf(m, "%u dwords in ring\n", count);
        for (j = 0; j <= count; j++) {
-               i = (rdp + j) & rdev->cp.ptr_mask;
-               seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
+               i = (rdp + j) & ring->ptr_mask;
+               seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
        }
        return 0;
 }
@@ -3635,7 +3646,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
        }
 }
 
-int r100_ring_test(struct radeon_device *rdev)
+int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        uint32_t scratch;
        uint32_t tmp = 0;
@@ -3648,15 +3659,15 @@ int r100_ring_test(struct radeon_device *rdev)
                return r;
        }
        WREG32(scratch, 0xCAFEDEAD);
-       r = radeon_ring_lock(rdev, 2);
+       r = radeon_ring_lock(rdev, ring, 2);
        if (r) {
                DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
                radeon_scratch_free(rdev, scratch);
                return r;
        }
-       radeon_ring_write(rdev, PACKET0(scratch, 0));
-       radeon_ring_write(rdev, 0xDEADBEEF);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_write(ring, PACKET0(scratch, 0));
+       radeon_ring_write(ring, 0xDEADBEEF);
+       radeon_ring_unlock_commit(rdev, ring);
        for (i = 0; i < rdev->usec_timeout; i++) {
                tmp = RREG32(scratch);
                if (tmp == 0xDEADBEEF) {
@@ -3677,9 +3688,11 @@ int r100_ring_test(struct radeon_device *rdev)
 
 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 {
-       radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
-       radeon_ring_write(rdev, ib->gpu_addr);
-       radeon_ring_write(rdev, ib->length_dw);
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
+       radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
+       radeon_ring_write(ring, ib->gpu_addr);
+       radeon_ring_write(ring, ib->length_dw);
 }
 
 int r100_ib_test(struct radeon_device *rdev)
@@ -3696,7 +3709,7 @@ int r100_ib_test(struct radeon_device *rdev)
                return r;
        }
        WREG32(scratch, 0xCAFEDEAD);
-       r = radeon_ib_get(rdev, &ib);
+       r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, 256);
        if (r) {
                return r;
        }
@@ -3740,34 +3753,16 @@ int r100_ib_test(struct radeon_device *rdev)
 
 void r100_ib_fini(struct radeon_device *rdev)
 {
+       radeon_ib_pool_suspend(rdev);
        radeon_ib_pool_fini(rdev);
 }
 
-int r100_ib_init(struct radeon_device *rdev)
-{
-       int r;
-
-       r = radeon_ib_pool_init(rdev);
-       if (r) {
-               dev_err(rdev->dev, "failed initializing IB pool (%d).\n", r);
-               r100_ib_fini(rdev);
-               return r;
-       }
-       r = r100_ib_test(rdev);
-       if (r) {
-               dev_err(rdev->dev, "failed testing IB (%d).\n", r);
-               r100_ib_fini(rdev);
-               return r;
-       }
-       return 0;
-}
-
 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
 {
        /* Shutdown CP we shouldn't need to do that but better be safe than
         * sorry
         */
-       rdev->cp.ready = false;
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
        WREG32(R_000740_CP_CSQ_CNTL, 0);
 
        /* Save few CRTC registers */
@@ -3905,6 +3900,12 @@ static int r100_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r100_irq_set(rdev);
        rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -3914,11 +3915,18 @@ static int r100_startup(struct radeon_device *rdev)
                dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
                return r;
        }
-       r = r100_ib_init(rdev);
+
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r100_ib_test(rdev);
        if (r) {
-               dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+               dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
                return r;
        }
+
        return 0;
 }
 
@@ -3941,11 +3949,14 @@ int r100_resume(struct radeon_device *rdev)
        r100_clock_startup(rdev);
        /* Initialize surface registers */
        radeon_surface_init(rdev);
+
+       rdev->accel_working = true;
        return r100_startup(rdev);
 }
 
 int r100_suspend(struct radeon_device *rdev)
 {
+       radeon_ib_pool_suspend(rdev);
        r100_cp_disable(rdev);
        radeon_wb_disable(rdev);
        r100_irq_disable(rdev);
@@ -4064,7 +4075,14 @@ int r100_init(struct radeon_device *rdev)
                        return r;
        }
        r100_set_safe_registers(rdev);
+
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
        r = r100_startup(rdev);
        if (r) {
                /* Somethings want wront with the accel init stop accel */
index a1f3ba0..eba4cbf 100644 (file)
@@ -87,6 +87,7 @@ int r200_copy_dma(struct radeon_device *rdev,
                  unsigned num_gpu_pages,
                  struct radeon_fence *fence)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        uint32_t size;
        uint32_t cur_size;
        int i, num_loops;
@@ -95,33 +96,33 @@ int r200_copy_dma(struct radeon_device *rdev,
        /* radeon pitch is /64 */
        size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
        num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
-       r = radeon_ring_lock(rdev, num_loops * 4 + 64);
+       r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
        if (r) {
                DRM_ERROR("radeon: moving bo (%d).\n", r);
                return r;
        }
        /* Must wait for 2D idle & clean before DMA or hangs might happen */
-       radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
-       radeon_ring_write(rdev, (1 << 16));
+       radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+       radeon_ring_write(ring, (1 << 16));
        for (i = 0; i < num_loops; i++) {
                cur_size = size;
                if (cur_size > 0x1FFFFF) {
                        cur_size = 0x1FFFFF;
                }
                size -= cur_size;
-               radeon_ring_write(rdev, PACKET0(0x720, 2));
-               radeon_ring_write(rdev, src_offset);
-               radeon_ring_write(rdev, dst_offset);
-               radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
+               radeon_ring_write(ring, PACKET0(0x720, 2));
+               radeon_ring_write(ring, src_offset);
+               radeon_ring_write(ring, dst_offset);
+               radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30));
                src_offset += cur_size;
                dst_offset += cur_size;
        }
-       radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
-       radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
+       radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+       radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
        if (fence) {
                r = radeon_fence_emit(rdev, fence);
        }
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_unlock_commit(rdev, ring);
        return r;
 }
 
index c93bc64..3fc0d29 100644 (file)
@@ -175,37 +175,40 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev)
 void r300_fence_ring_emit(struct radeon_device *rdev,
                          struct radeon_fence *fence)
 {
+       struct radeon_ring *ring = &rdev->ring[fence->ring];
+
        /* Who ever call radeon_fence_emit should call ring_lock and ask
         * for enough space (today caller are ib schedule and buffer move) */
        /* Write SC register so SC & US assert idle */
-       radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0));
+       radeon_ring_write(ring, 0);
        /* Flush 3D cache */
-       radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
-       radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, R300_ZC_FLUSH);
+       radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, R300_RB3D_DC_FLUSH);
+       radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, R300_ZC_FLUSH);
        /* Wait until IDLE & CLEAN */
-       radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
-       radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
+       radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+       radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN |
                                 RADEON_WAIT_2D_IDLECLEAN |
                                 RADEON_WAIT_DMA_GUI_IDLE));
-       radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
-       radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
+       radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+       radeon_ring_write(ring, rdev->config.r300.hdp_cntl |
                                RADEON_HDP_READ_BUFFER_INVALIDATE);
-       radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
-       radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
+       radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+       radeon_ring_write(ring, rdev->config.r300.hdp_cntl);
        /* Emit fence sequence & fire IRQ */
-       radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
-       radeon_ring_write(rdev, fence->seq);
-       radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
-       radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
+       radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
+       radeon_ring_write(ring, fence->seq);
+       radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
+       radeon_ring_write(ring, RADEON_SW_INT_FIRE);
 }
 
 void r300_ring_start(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        unsigned gb_tile_config;
        int r;
 
@@ -227,44 +230,44 @@ void r300_ring_start(struct radeon_device *rdev)
                break;
        }
 
-       r = radeon_ring_lock(rdev, 64);
+       r = radeon_ring_lock(rdev, ring, 64);
        if (r) {
                return;
        }
-       radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
+       radeon_ring_write(ring,
                          RADEON_ISYNC_ANY2D_IDLE3D |
                          RADEON_ISYNC_ANY3D_IDLE2D |
                          RADEON_ISYNC_WAIT_IDLEGUI |
                          RADEON_ISYNC_CPSCRATCH_IDLEGUI);
-       radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
-       radeon_ring_write(rdev, gb_tile_config);
-       radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0));
+       radeon_ring_write(ring, gb_tile_config);
+       radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+       radeon_ring_write(ring,
                          RADEON_WAIT_2D_IDLECLEAN |
                          RADEON_WAIT_3D_IDLECLEAN);
-       radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
-       radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
-       radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
-       radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
-       radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
+       radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
+       radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+       radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
+       radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+       radeon_ring_write(ring,
                          RADEON_WAIT_2D_IDLECLEAN |
                          RADEON_WAIT_3D_IDLECLEAN);
-       radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
-       radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
-       radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+       radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
+       radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0));
+       radeon_ring_write(ring,
                          ((6 << R300_MS_X0_SHIFT) |
                           (6 << R300_MS_Y0_SHIFT) |
                           (6 << R300_MS_X1_SHIFT) |
@@ -273,8 +276,8 @@ void r300_ring_start(struct radeon_device *rdev)
                           (6 << R300_MS_Y2_SHIFT) |
                           (6 << R300_MSBD0_Y_SHIFT) |
                           (6 << R300_MSBD0_X_SHIFT)));
-       radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0));
+       radeon_ring_write(ring,
                          ((6 << R300_MS_X3_SHIFT) |
                           (6 << R300_MS_Y3_SHIFT) |
                           (6 << R300_MS_X4_SHIFT) |
@@ -282,16 +285,16 @@ void r300_ring_start(struct radeon_device *rdev)
                           (6 << R300_MS_X5_SHIFT) |
                           (6 << R300_MS_Y5_SHIFT) |
                           (6 << R300_MSBD1_SHIFT)));
-       radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
-       radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
-       radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0));
+       radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
+       radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0));
+       radeon_ring_write(ring,
                          R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
-       radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0));
+       radeon_ring_write(ring,
                          R300_GEOMETRY_ROUND_NEAREST |
                          R300_COLOR_ROUND_NEAREST);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_unlock_commit(rdev, ring);
 }
 
 void r300_errata(struct radeon_device *rdev)
@@ -375,26 +378,26 @@ void r300_gpu_init(struct radeon_device *rdev)
                 rdev->num_gb_pipes, rdev->num_z_pipes);
 }
 
-bool r300_gpu_is_lockup(struct radeon_device *rdev)
+bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        u32 rbbm_status;
        int r;
 
        rbbm_status = RREG32(R_000E40_RBBM_STATUS);
        if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
-               r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
+               r100_gpu_lockup_update(&rdev->config.r300.lockup, ring);
                return false;
        }
        /* force CP activities */
-       r = radeon_ring_lock(rdev, 2);
+       r = radeon_ring_lock(rdev, ring, 2);
        if (!r) {
                /* PACKET2 NOP */
-               radeon_ring_write(rdev, 0x80000000);
-               radeon_ring_write(rdev, 0x80000000);
-               radeon_ring_unlock_commit(rdev);
+               radeon_ring_write(ring, 0x80000000);
+               radeon_ring_write(ring, 0x80000000);
+               radeon_ring_unlock_commit(rdev, ring);
        }
-       rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
-       return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
+       ring->rptr = RREG32(RADEON_CP_RB_RPTR);
+       return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, ring);
 }
 
 int r300_asic_reset(struct radeon_device *rdev)
@@ -701,7 +704,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                        return r;
                }
 
-               if (p->keep_tiling_flags) {
+               if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
                        ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
                                  ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
                } else {
@@ -765,7 +768,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                /* RB3D_COLORPITCH1 */
                /* RB3D_COLORPITCH2 */
                /* RB3D_COLORPITCH3 */
-               if (!p->keep_tiling_flags) {
+               if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
                        r = r100_cs_packet_next_reloc(p, &reloc);
                        if (r) {
                                DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
@@ -850,7 +853,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                break;
        case 0x4F24:
                /* ZB_DEPTHPITCH */
-               if (!p->keep_tiling_flags) {
+               if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
                        r = r100_cs_packet_next_reloc(p, &reloc);
                        if (r) {
                                DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
@@ -1396,6 +1399,12 @@ static int r300_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r100_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -1405,11 +1414,18 @@ static int r300_startup(struct radeon_device *rdev)
                dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
                return r;
        }
-       r = r100_ib_init(rdev);
+
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r100_ib_test(rdev);
        if (r) {
-               dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+               dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
                return r;
        }
+
        return 0;
 }
 
@@ -1434,11 +1450,14 @@ int r300_resume(struct radeon_device *rdev)
        r300_clock_startup(rdev);
        /* Initialize surface registers */
        radeon_surface_init(rdev);
+
+       rdev->accel_working = true;
        return r300_startup(rdev);
 }
 
 int r300_suspend(struct radeon_device *rdev)
 {
+       radeon_ib_pool_suspend(rdev);
        r100_cp_disable(rdev);
        radeon_wb_disable(rdev);
        r100_irq_disable(rdev);
@@ -1539,7 +1558,14 @@ int r300_init(struct radeon_device *rdev)
                        return r;
        }
        r300_set_reg_safe(rdev);
+
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
        r = r300_startup(rdev);
        if (r) {
                /* Somethings want wront with the accel init stop accel */
index 417fab8..666e28f 100644 (file)
@@ -199,6 +199,8 @@ static void r420_clock_resume(struct radeon_device *rdev)
 
 static void r420_cp_errata_init(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
        /* RV410 and R420 can lock up if CP DMA to host memory happens
         * while the 2D engine is busy.
         *
@@ -206,22 +208,24 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
         * of the CP init, apparently.
         */
        radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
-       radeon_ring_lock(rdev, 8);
-       radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
-       radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
-       radeon_ring_write(rdev, 0xDEADBEEF);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_lock(rdev, ring, 8);
+       radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
+       radeon_ring_write(ring, rdev->config.r300.resync_scratch);
+       radeon_ring_write(ring, 0xDEADBEEF);
+       radeon_ring_unlock_commit(rdev, ring);
 }
 
 static void r420_cp_errata_fini(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
        /* Catch the RESYNC we dispatched all the way back,
         * at the very beginning of the CP init.
         */
-       radeon_ring_lock(rdev, 8);
-       radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_lock(rdev, ring, 8);
+       radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, R300_RB3D_DC_FINISH);
+       radeon_ring_unlock_commit(rdev, ring);
        radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
 }
 
@@ -254,6 +258,12 @@ static int r420_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r100_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -264,11 +274,18 @@ static int r420_startup(struct radeon_device *rdev)
                return r;
        }
        r420_cp_errata_init(rdev);
-       r = r100_ib_init(rdev);
+
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r100_ib_test(rdev);
        if (r) {
-               dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+               dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
                return r;
        }
+
        return 0;
 }
 
@@ -297,11 +314,14 @@ int r420_resume(struct radeon_device *rdev)
        r420_clock_resume(rdev);
        /* Initialize surface registers */
        radeon_surface_init(rdev);
+
+       rdev->accel_working = true;
        return r420_startup(rdev);
 }
 
 int r420_suspend(struct radeon_device *rdev)
 {
+       radeon_ib_pool_suspend(rdev);
        r420_cp_errata_fini(rdev);
        r100_cp_disable(rdev);
        radeon_wb_disable(rdev);
@@ -414,7 +434,14 @@ int r420_init(struct radeon_device *rdev)
                        return r;
        }
        r420_set_reg_safe(rdev);
+
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
        r = r420_startup(rdev);
        if (r) {
                /* Somethings want wront with the accel init stop accel */
index fc43705..3bd8f1b 100644 (file)
 
 #define AVIVO_TMDSA_CNTL                    0x7880
 #   define AVIVO_TMDSA_CNTL_ENABLE               (1 << 0)
+#   define AVIVO_TMDSA_CNTL_HDMI_EN              (1 << 2)
 #   define AVIVO_TMDSA_CNTL_HPD_MASK             (1 << 4)
 #   define AVIVO_TMDSA_CNTL_HPD_SELECT           (1 << 8)
 #   define AVIVO_TMDSA_CNTL_SYNC_PHASE           (1 << 12)
 
 #define AVIVO_LVTMA_CNTL                                       0x7a80
 #   define AVIVO_LVTMA_CNTL_ENABLE               (1 << 0)
+#   define AVIVO_LVTMA_CNTL_HDMI_EN              (1 << 2)
 #   define AVIVO_LVTMA_CNTL_HPD_MASK             (1 << 4)
 #   define AVIVO_LVTMA_CNTL_HPD_SELECT           (1 << 8)
 #   define AVIVO_LVTMA_CNTL_SYNC_PHASE           (1 << 12)
index 3081d07..4ae1615 100644 (file)
@@ -187,6 +187,12 @@ static int r520_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        rs600_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -196,9 +202,15 @@ static int r520_startup(struct radeon_device *rdev)
                dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
                return r;
        }
-       r = r100_ib_init(rdev);
+
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r100_ib_test(rdev);
        if (r) {
-               dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+               dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
                return r;
        }
        return 0;
@@ -223,6 +235,8 @@ int r520_resume(struct radeon_device *rdev)
        rv515_clock_startup(rdev);
        /* Initialize surface registers */
        radeon_surface_init(rdev);
+
+       rdev->accel_working = true;
        return r520_startup(rdev);
 }
 
@@ -292,7 +306,14 @@ int r520_init(struct radeon_device *rdev)
        if (r)
                return r;
        rv515_set_safe_registers(rdev);
+
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
        r = r520_startup(rdev);
        if (r) {
                /* Somethings want wront with the accel init stop accel */
index 9cdda0b..4f08e5e 100644 (file)
@@ -1344,7 +1344,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
        return 0;
 }
 
-bool r600_gpu_is_lockup(struct radeon_device *rdev)
+bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        u32 srbm_status;
        u32 grbm_status;
@@ -1361,19 +1361,19 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev)
        grbm_status = RREG32(R_008010_GRBM_STATUS);
        grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
        if (!G_008010_GUI_ACTIVE(grbm_status)) {
-               r100_gpu_lockup_update(lockup, &rdev->cp);
+               r100_gpu_lockup_update(lockup, ring);
                return false;
        }
        /* force CP activities */
-       r = radeon_ring_lock(rdev, 2);
+       r = radeon_ring_lock(rdev, ring, 2);
        if (!r) {
                /* PACKET2 NOP */
-               radeon_ring_write(rdev, 0x80000000);
-               radeon_ring_write(rdev, 0x80000000);
-               radeon_ring_unlock_commit(rdev);
+               radeon_ring_write(ring, 0x80000000);
+               radeon_ring_write(ring, 0x80000000);
+               radeon_ring_unlock_commit(rdev, ring);
        }
-       rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
-       return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
+       ring->rptr = RREG32(ring->rptr_reg);
+       return r100_gpu_cp_is_lockup(rdev, lockup, ring);
 }
 
 int r600_asic_reset(struct radeon_device *rdev)
@@ -2144,27 +2144,28 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
 
 int r600_cp_start(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r;
        uint32_t cp_me;
 
-       r = radeon_ring_lock(rdev, 7);
+       r = radeon_ring_lock(rdev, ring, 7);
        if (r) {
                DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
                return r;
        }
-       radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
-       radeon_ring_write(rdev, 0x1);
+       radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+       radeon_ring_write(ring, 0x1);
        if (rdev->family >= CHIP_RV770) {
-               radeon_ring_write(rdev, 0x0);
-               radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
+               radeon_ring_write(ring, 0x0);
+               radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
        } else {
-               radeon_ring_write(rdev, 0x3);
-               radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
+               radeon_ring_write(ring, 0x3);
+               radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
        }
-       radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_unlock_commit(rdev, ring);
 
        cp_me = 0xff;
        WREG32(R_0086D8_CP_ME_CNTL, cp_me);
@@ -2173,6 +2174,7 @@ int r600_cp_start(struct radeon_device *rdev)
 
 int r600_cp_resume(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 tmp;
        u32 rb_bufsz;
        int r;
@@ -2184,13 +2186,13 @@ int r600_cp_resume(struct radeon_device *rdev)
        WREG32(GRBM_SOFT_RESET, 0);
 
        /* Set ring buffer size */
-       rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+       rb_bufsz = drm_order(ring->ring_size / 8);
        tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
 #endif
        WREG32(CP_RB_CNTL, tmp);
-       WREG32(CP_SEM_WAIT_TIMER, 0x4);
+       WREG32(CP_SEM_WAIT_TIMER, 0x0);
 
        /* Set the write pointer delay */
        WREG32(CP_RB_WPTR_DELAY, 0);
@@ -2198,8 +2200,8 @@ int r600_cp_resume(struct radeon_device *rdev)
        /* Initialize the ring buffer's read and write pointers */
        WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
        WREG32(CP_RB_RPTR_WR, 0);
-       rdev->cp.wptr = 0;
-       WREG32(CP_RB_WPTR, rdev->cp.wptr);
+       ring->wptr = 0;
+       WREG32(CP_RB_WPTR, ring->wptr);
 
        /* set the wb address whether it's enabled or not */
        WREG32(CP_RB_RPTR_ADDR,
@@ -2217,42 +2219,36 @@ int r600_cp_resume(struct radeon_device *rdev)
        mdelay(1);
        WREG32(CP_RB_CNTL, tmp);
 
-       WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
+       WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
        WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
 
-       rdev->cp.rptr = RREG32(CP_RB_RPTR);
+       ring->rptr = RREG32(CP_RB_RPTR);
 
        r600_cp_start(rdev);
-       rdev->cp.ready = true;
-       r = radeon_ring_test(rdev);
+       ring->ready = true;
+       r = radeon_ring_test(rdev, ring);
        if (r) {
-               rdev->cp.ready = false;
+               ring->ready = false;
                return r;
        }
        return 0;
 }
 
-void r600_cp_commit(struct radeon_device *rdev)
-{
-       WREG32(CP_RB_WPTR, rdev->cp.wptr);
-       (void)RREG32(CP_RB_WPTR);
-}
-
-void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
+void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
 {
        u32 rb_bufsz;
 
        /* Align ring size */
        rb_bufsz = drm_order(ring_size / 8);
        ring_size = (1 << (rb_bufsz + 1)) * 4;
-       rdev->cp.ring_size = ring_size;
-       rdev->cp.align_mask = 16 - 1;
+       ring->ring_size = ring_size;
+       ring->align_mask = 16 - 1;
 }
 
 void r600_cp_fini(struct radeon_device *rdev)
 {
        r600_cp_stop(rdev);
-       radeon_ring_fini(rdev);
+       radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
 }
 
 
@@ -2271,11 +2267,11 @@ void r600_scratch_init(struct radeon_device *rdev)
        }
 }
 
-int r600_ring_test(struct radeon_device *rdev)
+int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        uint32_t scratch;
        uint32_t tmp = 0;
-       unsigned i;
+       unsigned i, ridx = radeon_ring_index(rdev, ring);
        int r;
 
        r = radeon_scratch_get(rdev, &scratch);
@@ -2284,16 +2280,16 @@ int r600_ring_test(struct radeon_device *rdev)
                return r;
        }
        WREG32(scratch, 0xCAFEDEAD);
-       r = radeon_ring_lock(rdev, 3);
+       r = radeon_ring_lock(rdev, ring, 3);
        if (r) {
-               DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+               DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r);
                radeon_scratch_free(rdev, scratch);
                return r;
        }
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-       radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
-       radeon_ring_write(rdev, 0xDEADBEEF);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+       radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+       radeon_ring_write(ring, 0xDEADBEEF);
+       radeon_ring_unlock_commit(rdev, ring);
        for (i = 0; i < rdev->usec_timeout; i++) {
                tmp = RREG32(scratch);
                if (tmp == 0xDEADBEEF)
@@ -2301,10 +2297,10 @@ int r600_ring_test(struct radeon_device *rdev)
                DRM_UDELAY(1);
        }
        if (i < rdev->usec_timeout) {
-               DRM_INFO("ring test succeeded in %d usecs\n", i);
+               DRM_INFO("ring test on %d succeeded in %d usecs\n", ridx, i);
        } else {
-               DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
-                         scratch, tmp);
+               DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
+                         ridx, scratch, tmp);
                r = -EINVAL;
        }
        radeon_scratch_free(rdev, scratch);
@@ -2314,49 +2310,63 @@ int r600_ring_test(struct radeon_device *rdev)
 void r600_fence_ring_emit(struct radeon_device *rdev,
                          struct radeon_fence *fence)
 {
+       struct radeon_ring *ring = &rdev->ring[fence->ring];
+
        if (rdev->wb.use_event) {
-               u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
-                       (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
+               u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
                /* flush read cache over gart */
-               radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
-               radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
+               radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+               radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
                                        PACKET3_VC_ACTION_ENA |
                                        PACKET3_SH_ACTION_ENA);
-               radeon_ring_write(rdev, 0xFFFFFFFF);
-               radeon_ring_write(rdev, 0);
-               radeon_ring_write(rdev, 10); /* poll interval */
+               radeon_ring_write(ring, 0xFFFFFFFF);
+               radeon_ring_write(ring, 0);
+               radeon_ring_write(ring, 10); /* poll interval */
                /* EVENT_WRITE_EOP - flush caches, send int */
-               radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
-               radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
-               radeon_ring_write(rdev, addr & 0xffffffff);
-               radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
-               radeon_ring_write(rdev, fence->seq);
-               radeon_ring_write(rdev, 0);
+               radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+               radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+               radeon_ring_write(ring, addr & 0xffffffff);
+               radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+               radeon_ring_write(ring, fence->seq);
+               radeon_ring_write(ring, 0);
        } else {
                /* flush read cache over gart */
-               radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
-               radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
+               radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+               radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
                                        PACKET3_VC_ACTION_ENA |
                                        PACKET3_SH_ACTION_ENA);
-               radeon_ring_write(rdev, 0xFFFFFFFF);
-               radeon_ring_write(rdev, 0);
-               radeon_ring_write(rdev, 10); /* poll interval */
-               radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
-               radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
+               radeon_ring_write(ring, 0xFFFFFFFF);
+               radeon_ring_write(ring, 0);
+               radeon_ring_write(ring, 10); /* poll interval */
+               radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
+               radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
                /* wait for 3D idle clean */
-               radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-               radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
-               radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
+               radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+               radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+               radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
                /* Emit fence sequence & fire IRQ */
-               radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-               radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
-               radeon_ring_write(rdev, fence->seq);
+               radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+               radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+               radeon_ring_write(ring, fence->seq);
                /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
-               radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
-               radeon_ring_write(rdev, RB_INT_STAT);
+               radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
+               radeon_ring_write(ring, RB_INT_STAT);
        }
 }
 
+void r600_semaphore_ring_emit(struct radeon_device *rdev,
+                             struct radeon_ring *ring,
+                             struct radeon_semaphore *semaphore,
+                             bool emit_wait)
+{
+       uint64_t addr = semaphore->gpu_addr;
+       unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
+
+       radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
+       radeon_ring_write(ring, addr & 0xffffffff);
+       radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
+}
+
 int r600_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset,
                   uint64_t dst_offset,
@@ -2409,6 +2419,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
 
 int r600_startup(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r;
 
        /* enable pcie gen2 link */
@@ -2447,6 +2458,12 @@ int r600_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
@@ -2456,7 +2473,10 @@ int r600_startup(struct radeon_device *rdev)
        }
        r600_irq_set(rdev);
 
-       r = radeon_ring_init(rdev, rdev->cp.ring_size);
+       r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+                            R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+                            0, 0xfffff, RADEON_CP_PACKET2);
+
        if (r)
                return r;
        r = r600_cp_load_microcode(rdev);
@@ -2466,6 +2486,17 @@ int r600_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
+               return r;
+       }
+
        return 0;
 }
 
@@ -2494,18 +2525,13 @@ int r600_resume(struct radeon_device *rdev)
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
 
+       rdev->accel_working = true;
        r = r600_startup(rdev);
        if (r) {
                DRM_ERROR("r600 startup failed on resume\n");
                return r;
        }
 
-       r = r600_ib_test(rdev);
-       if (r) {
-               DRM_ERROR("radeon: failed testing IB (%d).\n", r);
-               return r;
-       }
-
        r = r600_audio_init(rdev);
        if (r) {
                DRM_ERROR("radeon: audio resume failed\n");
@@ -2518,13 +2544,14 @@ int r600_resume(struct radeon_device *rdev)
 int r600_suspend(struct radeon_device *rdev)
 {
        r600_audio_fini(rdev);
+       radeon_ib_pool_suspend(rdev);
+       r600_blit_suspend(rdev);
        /* FIXME: we should wait for ring to be empty */
        r600_cp_stop(rdev);
-       rdev->cp.ready = false;
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
        r600_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        r600_pcie_gart_disable(rdev);
-       r600_blit_suspend(rdev);
 
        return 0;
 }
@@ -2595,8 +2622,8 @@ int r600_init(struct radeon_device *rdev)
        if (r)
                return r;
 
-       rdev->cp.ring_obj = NULL;
-       r600_ring_init(rdev, 1024 * 1024);
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+       r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
@@ -2605,30 +2632,24 @@ int r600_init(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
        r = r600_startup(rdev);
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                r600_cp_fini(rdev);
                r600_irq_fini(rdev);
                radeon_wb_fini(rdev);
+               r100_ib_fini(rdev);
                radeon_irq_kms_fini(rdev);
                r600_pcie_gart_fini(rdev);
                rdev->accel_working = false;
        }
-       if (rdev->accel_working) {
-               r = radeon_ib_pool_init(rdev);
-               if (r) {
-                       dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
-                       rdev->accel_working = false;
-               } else {
-                       r = r600_ib_test(rdev);
-                       if (r) {
-                               dev_err(rdev->dev, "IB test failed (%d).\n", r);
-                               rdev->accel_working = false;
-                       }
-               }
-       }
 
        r = r600_audio_init(rdev);
        if (r)
@@ -2643,12 +2664,13 @@ void r600_fini(struct radeon_device *rdev)
        r600_cp_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
-       radeon_ib_pool_fini(rdev);
+       r100_ib_fini(rdev);
        radeon_irq_kms_fini(rdev);
        r600_pcie_gart_fini(rdev);
        r600_vram_scratch_fini(rdev);
        radeon_agp_fini(rdev);
        radeon_gem_fini(rdev);
+       radeon_semaphore_driver_fini(rdev);
        radeon_fence_driver_fini(rdev);
        radeon_bo_fini(rdev);
        radeon_atombios_fini(rdev);
@@ -2662,18 +2684,20 @@ void r600_fini(struct radeon_device *rdev)
  */
 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 {
+       struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+
        /* FIXME: implement */
-       radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+       radeon_ring_write(ring,
 #ifdef __BIG_ENDIAN
                          (2 << 0) |
 #endif
                          (ib->gpu_addr & 0xFFFFFFFC));
-       radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
-       radeon_ring_write(rdev, ib->length_dw);
+       radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+       radeon_ring_write(ring, ib->length_dw);
 }
 
-int r600_ib_test(struct radeon_device *rdev)
+int r600_ib_test(struct radeon_device *rdev, int ring)
 {
        struct radeon_ib *ib;
        uint32_t scratch;
@@ -2687,7 +2711,7 @@ int r600_ib_test(struct radeon_device *rdev)
                return r;
        }
        WREG32(scratch, 0xCAFEDEAD);
-       r = radeon_ib_get(rdev, &ib);
+       r = radeon_ib_get(rdev, ring, &ib, 256);
        if (r) {
                DRM_ERROR("radeon: failed to get ib (%d).\n", r);
                return r;
@@ -2728,7 +2752,7 @@ int r600_ib_test(struct radeon_device *rdev)
                DRM_UDELAY(1);
        }
        if (i < rdev->usec_timeout) {
-               DRM_INFO("ib test succeeded in %u usecs\n", i);
+               DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib->fence->ring, i);
        } else {
                DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
                          scratch, tmp);
@@ -3075,7 +3099,7 @@ int r600_irq_set(struct radeon_device *rdev)
                hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
        }
 
-       if (rdev->irq.sw_int) {
+       if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
                DRM_DEBUG("r600_irq_set: sw int\n");
                cp_int_cntl |= RB_INT_ENABLE;
                cp_int_cntl |= TIME_STAMP_INT_ENABLE;
@@ -3459,11 +3483,11 @@ restart_ih:
                case 177: /* CP_INT in IB1 */
                case 178: /* CP_INT in IB2 */
                        DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
-                       radeon_fence_process(rdev);
+                       radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                        break;
                case 181: /* CP EOP event */
                        DRM_DEBUG("IH: CP EOP\n");
-                       radeon_fence_process(rdev);
+                       radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                        break;
                case 233: /* GUI IDLE */
                        DRM_DEBUG("IH: GUI idle\n");
@@ -3496,30 +3520,6 @@ restart_ih:
  */
 #if defined(CONFIG_DEBUG_FS)
 
-static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
-{
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct radeon_device *rdev = dev->dev_private;
-       unsigned count, i, j;
-
-       radeon_ring_free_size(rdev);
-       count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
-       seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
-       seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
-       seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
-       seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
-       seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
-       seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
-       seq_printf(m, "%u dwords in ring\n", count);
-       i = rdev->cp.rptr;
-       for (j = 0; j <= count; j++) {
-               seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
-               i = (i + 1) & rdev->cp.ptr_mask;
-       }
-       return 0;
-}
-
 static int r600_debugfs_mc_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -3533,7 +3533,6 @@ static int r600_debugfs_mc_info(struct seq_file *m, void *data)
 
 static struct drm_info_list r600_mc_info_list[] = {
        {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
-       {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
 };
 #endif
 
index 846fae5..ba66f30 100644 (file)
@@ -36,7 +36,7 @@
  */
 static int r600_audio_chipset_supported(struct radeon_device *rdev)
 {
-       return (rdev->family >= CHIP_R600 && rdev->family < CHIP_CEDAR)
+       return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE5(rdev))
                || rdev->family == CHIP_RS600
                || rdev->family == CHIP_RS690
                || rdev->family == CHIP_RS740;
@@ -161,8 +161,18 @@ static void r600_audio_update_hdmi(unsigned long param)
  */
 static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
 {
+       u32 value = 0;
        DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling");
-       WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000);
+       if (ASIC_IS_DCE4(rdev)) {
+               if (enable) {
+                       value |= 0x81000000; /* Required to enable audio */
+                       value |= 0x0e1000f0; /* fglrx sets that too */
+               }
+               WREG32(EVERGREEN_AUDIO_ENABLE, value);
+       } else {
+               WREG32_P(R600_AUDIO_ENABLE,
+                        enable ? 0x81000000 : 0x0, ~0x81000000);
+       }
        rdev->audio_enabled = enable;
 }
 
@@ -248,22 +258,33 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
                return;
        }
 
-       switch (dig->dig_encoder) {
-       case 0:
-               WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
-               WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
-               WREG32(R600_AUDIO_CLK_SRCSEL, 0);
-               break;
-
-       case 1:
-               WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
-               WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
-               WREG32(R600_AUDIO_CLK_SRCSEL, 1);
-               break;
-       default:
-               dev_err(rdev->dev, "Unsupported DIG on encoder 0x%02X\n",
-                         radeon_encoder->encoder_id);
-               return;
+       if (ASIC_IS_DCE4(rdev)) {
+               /* TODO: other PLLs? */
+               WREG32(EVERGREEN_AUDIO_PLL1_MUL, base_rate * 10);
+               WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
+               WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
+
+               /* Some magic trigger or src sel? */
+               WREG32_P(0x5ac, 0x01, ~0x77);
+       } else {
+               switch (dig->dig_encoder) {
+               case 0:
+                       WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
+                       WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
+                       WREG32(R600_AUDIO_CLK_SRCSEL, 0);
+                       break;
+
+               case 1:
+                       WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
+                       WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
+                       WREG32(R600_AUDIO_CLK_SRCSEL, 1);
+                       break;
+               default:
+                       dev_err(rdev->dev,
+                               "Unsupported DIG on encoder 0x%02X\n",
+                               radeon_encoder->encoder_id);
+                       return;
+               }
        }
 }
 
index e09d281..d996f43 100644 (file)
@@ -50,6 +50,7 @@ static void
 set_render_target(struct radeon_device *rdev, int format,
                  int w, int h, u64 gpu_addr)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 cb_color_info;
        int pitch, slice;
 
@@ -63,38 +64,38 @@ set_render_target(struct radeon_device *rdev, int format,
        pitch = (w / 8) - 1;
        slice = ((w * h) / 64) - 1;
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, gpu_addr >> 8);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, gpu_addr >> 8);
 
        if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
-               radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
-               radeon_ring_write(rdev, 2 << 0);
+               radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
+               radeon_ring_write(ring, 2 << 0);
        }
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, (pitch << 0) | (slice << 10));
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, (pitch << 0) | (slice << 10));
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, 0);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, cb_color_info);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, cb_color_info);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, 0);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, 0);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, 0);
 }
 
 /* emits 5dw */
@@ -103,6 +104,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
                    u32 sync_type, u32 size,
                    u64 mc_addr)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 cp_coher_size;
 
        if (size == 0xffffffff)
@@ -110,17 +112,18 @@ cp_set_surface_sync(struct radeon_device *rdev,
        else
                cp_coher_size = ((size + 255) >> 8);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
-       radeon_ring_write(rdev, sync_type);
-       radeon_ring_write(rdev, cp_coher_size);
-       radeon_ring_write(rdev, mc_addr >> 8);
-       radeon_ring_write(rdev, 10); /* poll interval */
+       radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+       radeon_ring_write(ring, sync_type);
+       radeon_ring_write(ring, cp_coher_size);
+       radeon_ring_write(ring, mc_addr >> 8);
+       radeon_ring_write(ring, 10); /* poll interval */
 }
 
 /* emits 21dw + 1 surface sync = 26dw */
 static void
 set_shaders(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u64 gpu_addr;
        u32 sq_pgm_resources;
 
@@ -129,35 +132,35 @@ set_shaders(struct radeon_device *rdev)
 
        /* VS */
        gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, gpu_addr >> 8);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, gpu_addr >> 8);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, sq_pgm_resources);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, sq_pgm_resources);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, 0);
 
        /* PS */
        gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, gpu_addr >> 8);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, gpu_addr >> 8);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, sq_pgm_resources | (1 << 28));
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, sq_pgm_resources | (1 << 28));
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, 2);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, 2);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-       radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, 0);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, 0);
 
        gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
        cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
@@ -167,6 +170,7 @@ set_shaders(struct radeon_device *rdev)
 static void
 set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 sq_vtx_constant_word2;
 
        sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
@@ -175,15 +179,15 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
        sq_vtx_constant_word2 |=  SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
 #endif
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
-       radeon_ring_write(rdev, 0x460);
-       radeon_ring_write(rdev, gpu_addr & 0xffffffff);
-       radeon_ring_write(rdev, 48 - 1);
-       radeon_ring_write(rdev, sq_vtx_constant_word2);
-       radeon_ring_write(rdev, 1 << 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
+       radeon_ring_write(ring, 0x460);
+       radeon_ring_write(ring, gpu_addr & 0xffffffff);
+       radeon_ring_write(ring, 48 - 1);
+       radeon_ring_write(ring, sq_vtx_constant_word2);
+       radeon_ring_write(ring, 1 << 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30);
 
        if ((rdev->family == CHIP_RV610) ||
            (rdev->family == CHIP_RV620) ||
@@ -203,6 +207,7 @@ set_tex_resource(struct radeon_device *rdev,
                 int format, int w, int h, int pitch,
                 u64 gpu_addr, u32 size)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
 
        if (h < 1)
@@ -225,15 +230,15 @@ set_tex_resource(struct radeon_device *rdev,
        cp_set_surface_sync(rdev,
                            PACKET3_TC_ACTION_ENA, size, gpu_addr);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, sq_tex_resource_word0);
-       radeon_ring_write(rdev, sq_tex_resource_word1);
-       radeon_ring_write(rdev, gpu_addr >> 8);
-       radeon_ring_write(rdev, gpu_addr >> 8);
-       radeon_ring_write(rdev, sq_tex_resource_word4);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, SQ_TEX_VTX_VALID_TEXTURE << 30);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, sq_tex_resource_word0);
+       radeon_ring_write(ring, sq_tex_resource_word1);
+       radeon_ring_write(ring, gpu_addr >> 8);
+       radeon_ring_write(ring, gpu_addr >> 8);
+       radeon_ring_write(ring, sq_tex_resource_word4);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30);
 }
 
 /* emits 12 */
@@ -241,43 +246,45 @@ static void
 set_scissors(struct radeon_device *rdev, int x1, int y1,
             int x2, int y2)
 {
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
-       radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
-       radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
-
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
-       radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
-       radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
-
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
-       radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
-       radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+       radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
+       radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+       radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+       radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+       radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+       radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
 }
 
 /* emits 10 */
 static void
 draw_auto(struct radeon_device *rdev)
 {
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-       radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, DI_PT_RECTLIST);
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+       radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, DI_PT_RECTLIST);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
+       radeon_ring_write(ring,
 #ifdef __BIG_ENDIAN
                          (2 << 2) |
 #endif
                          DI_INDEX_SIZE_16_BIT);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
-       radeon_ring_write(rdev, 1);
+       radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
+       radeon_ring_write(ring, 1);
 
-       radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
-       radeon_ring_write(rdev, 3);
-       radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
+       radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+       radeon_ring_write(ring, 3);
+       radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
 
 }
 
@@ -285,6 +292,7 @@ draw_auto(struct radeon_device *rdev)
 static void
 set_default_state(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
        u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
        int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
@@ -440,24 +448,24 @@ set_default_state(struct radeon_device *rdev)
        /* emit an IB pointing at default state */
        dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
        gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
-       radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+       radeon_ring_write(ring,
 #ifdef __BIG_ENDIAN
                          (2 << 0) |
 #endif
                          (gpu_addr & 0xFFFFFFFC));
-       radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
-       radeon_ring_write(rdev, dwords);
+       radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
+       radeon_ring_write(ring, dwords);
 
        /* SQ config */
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6));
-       radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, sq_config);
-       radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
-       radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
-       radeon_ring_write(rdev, sq_thread_resource_mgmt);
-       radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
-       radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6));
+       radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, sq_config);
+       radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
+       radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
+       radeon_ring_write(ring, sq_thread_resource_mgmt);
+       radeon_ring_write(ring, sq_stack_resource_mgmt_1);
+       radeon_ring_write(ring, sq_stack_resource_mgmt_2);
 }
 
 static uint32_t i2f(uint32_t input)
@@ -611,16 +619,17 @@ void r600_blit_fini(struct radeon_device *rdev)
        radeon_bo_unref(&rdev->r600_blit.shader_obj);
 }
 
-static int r600_vb_ib_get(struct radeon_device *rdev)
+static int r600_vb_ib_get(struct radeon_device *rdev, unsigned size)
 {
        int r;
-       r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
+       r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX,
+                         &rdev->r600_blit.vb_ib, size);
        if (r) {
                DRM_ERROR("failed to get IB for vertex buffer\n");
                return r;
        }
 
-       rdev->r600_blit.vb_total = 64*1024;
+       rdev->r600_blit.vb_total = size;
        rdev->r600_blit.vb_used = 0;
        return 0;
 }
@@ -679,15 +688,12 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
 
 int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r;
        int ring_size;
        int num_loops = 0;
        int dwords_per_loop = rdev->r600_blit.ring_size_per_loop;
 
-       r = r600_vb_ib_get(rdev);
-       if (r)
-               return r;
-
        /* num loops */
        while (num_gpu_pages) {
                num_gpu_pages -=
@@ -696,10 +702,15 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
                num_loops++;
        }
 
+       /* 48 bytes for vertex per loop */
+       r = r600_vb_ib_get(rdev, (num_loops*48)+256);
+       if (r)
+               return r;
+
        /* calculate number of loops correctly */
        ring_size = num_loops * dwords_per_loop;
        ring_size += rdev->r600_blit.ring_size_common;
-       r = radeon_ring_lock(rdev, ring_size);
+       r = radeon_ring_lock(rdev, ring, ring_size);
        if (r)
                return r;
 
@@ -718,7 +729,7 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
        if (fence)
                r = radeon_fence_emit(rdev, fence);
 
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_unlock_commit(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
 }
 
 void r600_kms_blit_copy(struct radeon_device *rdev,
index c9db493..84c5462 100644 (file)
@@ -1815,7 +1815,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
                     dev_priv->ring.size_l2qw);
 #endif
 
-       RADEON_WRITE(R600_CP_SEM_WAIT_TIMER, 0x4);
+       RADEON_WRITE(R600_CP_SEM_WAIT_TIMER, 0x0);
 
        /* Set the write pointer delay */
        RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
index cb1acff..38ce5d0 100644 (file)
@@ -941,7 +941,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                track->db_depth_control = radeon_get_ib_value(p, idx);
                break;
        case R_028010_DB_DEPTH_INFO:
-               if (!p->keep_tiling_flags &&
+               if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
                    r600_cs_packet_next_is_pkt3_nop(p)) {
                        r = r600_cs_packet_next_reloc(p, &reloc);
                        if (r) {
@@ -993,7 +993,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
        case R_0280B4_CB_COLOR5_INFO:
        case R_0280B8_CB_COLOR6_INFO:
        case R_0280BC_CB_COLOR7_INFO:
-               if (!p->keep_tiling_flags &&
+               if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
                     r600_cs_packet_next_is_pkt3_nop(p)) {
                        r = r600_cs_packet_next_reloc(p, &reloc);
                        if (r) {
@@ -1293,7 +1293,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
        mip_offset <<= 8;
 
        word0 = radeon_get_ib_value(p, idx + 0);
-       if (!p->keep_tiling_flags) {
+       if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
                if (tiling_flags & RADEON_TILING_MACRO)
                        word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
                else if (tiling_flags & RADEON_TILING_MICRO)
@@ -1625,7 +1625,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                                        return -EINVAL;
                                }
                                base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
-                               if (!p->keep_tiling_flags) {
+                               if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
                                        if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
                                                ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
                                        else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
index f5ac7e7..0b59206 100644 (file)
@@ -196,6 +196,13 @@ static void r600_hdmi_videoinfoframe(
        frame[0xD] = (right_bar >> 8);
 
        r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
+       /* Our header values (type, version, length) should be alright, Intel
+        * is using the same. Checksum function also seems to be OK, it works
+        * fine for audio infoframe. However calculated value is always lower
+        * by 2 in comparison to fglrx. It breaks displaying anything in case
+        * of TVs that strictly check the checksum. Hack it manually here to
+        * workaround this issue. */
+       frame[0x0] += 2;
 
        WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
                frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -313,7 +320,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
        struct radeon_device *rdev = dev->dev_private;
        uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
 
-       if (ASIC_IS_DCE4(rdev))
+       if (ASIC_IS_DCE5(rdev))
                return;
 
        if (!offset)
@@ -455,13 +462,31 @@ static void r600_hdmi_assign_block(struct drm_encoder *encoder)
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 
+       u16 eg_offsets[] = {
+               EVERGREEN_CRTC0_REGISTER_OFFSET,
+               EVERGREEN_CRTC1_REGISTER_OFFSET,
+               EVERGREEN_CRTC2_REGISTER_OFFSET,
+               EVERGREEN_CRTC3_REGISTER_OFFSET,
+               EVERGREEN_CRTC4_REGISTER_OFFSET,
+               EVERGREEN_CRTC5_REGISTER_OFFSET,
+       };
+
        if (!dig) {
                dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n");
                return;
        }
 
-       if (ASIC_IS_DCE4(rdev)) {
+       if (ASIC_IS_DCE5(rdev)) {
                /* TODO */
+       } else if (ASIC_IS_DCE4(rdev)) {
+               if (dig->dig_encoder >= ARRAY_SIZE(eg_offsets)) {
+                       dev_err(rdev->dev, "Enabling HDMI on unknown dig\n");
+                       return;
+               }
+               radeon_encoder->hdmi_offset = EVERGREEN_HDMI_BASE +
+                                               eg_offsets[dig->dig_encoder];
+               radeon_encoder->hdmi_config_offset = radeon_encoder->hdmi_offset
+                                               + EVERGREEN_HDMI_CONFIG_OFFSET;
        } else if (ASIC_IS_DCE3(rdev)) {
                radeon_encoder->hdmi_offset = dig->dig_encoder ?
                        R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1;
@@ -484,7 +509,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        uint32_t offset;
 
-       if (ASIC_IS_DCE4(rdev))
+       if (ASIC_IS_DCE5(rdev))
                return;
 
        if (!radeon_encoder->hdmi_offset) {
@@ -497,16 +522,24 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
        }
 
        offset = radeon_encoder->hdmi_offset;
-       if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
+       if (ASIC_IS_DCE5(rdev)) {
+               /* TODO */
+       } else if (ASIC_IS_DCE4(rdev)) {
+               WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0x1, ~0x1);
+       } else if (ASIC_IS_DCE32(rdev)) {
                WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
-       } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+       } else if (ASIC_IS_DCE3(rdev)) {
+               /* TODO */
+       } else if (rdev->family >= CHIP_R600) {
                switch (radeon_encoder->encoder_id) {
                case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
-                       WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
+                       WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN,
+                                ~AVIVO_TMDSA_CNTL_HDMI_EN);
                        WREG32(offset + R600_HDMI_ENABLE, 0x101);
                        break;
                case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
-                       WREG32_P(AVIVO_LVTMA_CNTL, 0x4, ~0x4);
+                       WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN,
+                                ~AVIVO_LVTMA_CNTL_HDMI_EN);
                        WREG32(offset + R600_HDMI_ENABLE, 0x105);
                        break;
                default:
@@ -518,8 +551,8 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
        if (rdev->irq.installed
            && rdev->family != CHIP_RS600
            && rdev->family != CHIP_RS690
-           && rdev->family != CHIP_RS740) {
-
+           && rdev->family != CHIP_RS740
+           && !ASIC_IS_DCE4(rdev)) {
                /* if irq is available use it */
                rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
                radeon_irq_set(rdev);
@@ -544,7 +577,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        uint32_t offset;
 
-       if (ASIC_IS_DCE4(rdev))
+       if (ASIC_IS_DCE5(rdev))
                return;
 
        offset = radeon_encoder->hdmi_offset;
@@ -563,16 +596,22 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
        /* disable polling */
        r600_audio_disable_polling(encoder);
 
-       if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
+       if (ASIC_IS_DCE5(rdev)) {
+               /* TODO */
+       } else if (ASIC_IS_DCE4(rdev)) {
+               WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0, ~0x1);
+       } else if (ASIC_IS_DCE32(rdev)) {
                WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
        } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
                switch (radeon_encoder->encoder_id) {
                case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
-                       WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
+                       WREG32_P(AVIVO_TMDSA_CNTL, 0,
+                                ~AVIVO_TMDSA_CNTL_HDMI_EN);
                        WREG32(offset + R600_HDMI_ENABLE, 0);
                        break;
                case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
-                       WREG32_P(AVIVO_LVTMA_CNTL, 0, ~0x4);
+                       WREG32_P(AVIVO_LVTMA_CNTL, 0,
+                                ~AVIVO_LVTMA_CNTL_HDMI_EN);
                        WREG32(offset + R600_HDMI_ENABLE, 0);
                        break;
                default:
index bfe1b5d..3ee1fd7 100644 (file)
 #define        PACKET3_STRMOUT_BUFFER_UPDATE                   0x34
 #define        PACKET3_INDIRECT_BUFFER_MP                      0x38
 #define        PACKET3_MEM_SEMAPHORE                           0x39
+#              define PACKET3_SEM_SEL_SIGNAL       (0x6 << 29)
+#              define PACKET3_SEM_SEL_WAIT         (0x7 << 29)
 #define        PACKET3_MPEG_INDEX                              0x3A
 #define        PACKET3_WAIT_REG_MEM                            0x3C
 #define        PACKET3_MEM_WRITE                               0x3D
index 8227e76..73e05cb 100644 (file)
@@ -107,6 +107,21 @@ extern int radeon_msi;
 #define RADEONFB_CONN_LIMIT            4
 #define RADEON_BIOS_NUM_SCRATCH                8
 
+/* max number of rings */
+#define RADEON_NUM_RINGS 3
+
+/* internal ring indices */
+/* r1xx+ has gfx CP ring */
+#define RADEON_RING_TYPE_GFX_INDEX  0
+
+/* cayman has 2 compute CP rings */
+#define CAYMAN_RING_TYPE_CP1_INDEX 1
+#define CAYMAN_RING_TYPE_CP2_INDEX 2
+
+/* hardcode those limit for now */
+#define RADEON_VA_RESERVED_SIZE                (8 << 20)
+#define RADEON_IB_VM_MAX_SIZE          (64 << 10)
+
 /*
  * Errata workarounds.
  */
@@ -192,14 +207,15 @@ extern int sumo_get_temp(struct radeon_device *rdev);
  */
 struct radeon_fence_driver {
        uint32_t                        scratch_reg;
+       uint64_t                        gpu_addr;
+       volatile uint32_t               *cpu_addr;
        atomic_t                        seq;
        uint32_t                        last_seq;
        unsigned long                   last_jiffies;
        unsigned long                   last_timeout;
        wait_queue_head_t               queue;
-       rwlock_t                        lock;
        struct list_head                created;
-       struct list_head                emited;
+       struct list_head                emitted;
        struct list_head                signaled;
        bool                            initialized;
 };
@@ -210,21 +226,26 @@ struct radeon_fence {
        struct list_head                list;
        /* protected by radeon_fence.lock */
        uint32_t                        seq;
-       bool                            emited;
+       bool                            emitted;
        bool                            signaled;
+       /* RB, DMA, etc. */
+       int                             ring;
+       struct radeon_semaphore         *semaphore;
 };
 
+int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
 int radeon_fence_driver_init(struct radeon_device *rdev);
 void radeon_fence_driver_fini(struct radeon_device *rdev);
-int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence);
+int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
-void radeon_fence_process(struct radeon_device *rdev);
+void radeon_fence_process(struct radeon_device *rdev, int ring);
 bool radeon_fence_signaled(struct radeon_fence *fence);
 int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
-int radeon_fence_wait_next(struct radeon_device *rdev);
-int radeon_fence_wait_last(struct radeon_device *rdev);
+int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_last(struct radeon_device *rdev, int ring);
 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
 void radeon_fence_unref(struct radeon_fence **fence);
+int radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
 
 /*
  * Tiling registers
@@ -246,6 +267,21 @@ struct radeon_mman {
        bool                            initialized;
 };
 
+/* bo virtual address in a specific vm */
+struct radeon_bo_va {
+       /* bo list is protected by bo being reserved */
+       struct list_head                bo_list;
+       /* vm list is protected by vm mutex */
+       struct list_head                vm_list;
+       /* constant after initialization */
+       struct radeon_vm                *vm;
+       struct radeon_bo                *bo;
+       uint64_t                        soffset;
+       uint64_t                        eoffset;
+       uint32_t                        flags;
+       bool                            valid;
+};
+
 struct radeon_bo {
        /* Protected by gem.mutex */
        struct list_head                list;
@@ -259,6 +295,10 @@ struct radeon_bo {
        u32                             tiling_flags;
        u32                             pitch;
        int                             surface_reg;
+       /* list of all virtual address to which this bo
+        * is associated to
+        */
+       struct list_head                va;
        /* Constant after initialization */
        struct radeon_device            *rdev;
        struct drm_gem_object           gem_base;
@@ -274,6 +314,48 @@ struct radeon_bo_list {
        u32                     tiling_flags;
 };
 
+/* sub-allocation manager, it has to be protected by another lock.
+ * By conception this is an helper for other part of the driver
+ * like the indirect buffer or semaphore, which both have their
+ * locking.
+ *
+ * Principe is simple, we keep a list of sub allocation in offset
+ * order (first entry has offset == 0, last entry has the highest
+ * offset).
+ *
+ * When allocating new object we first check if there is room at
+ * the end total_size - (last_object_offset + last_object_size) >=
+ * alloc_size. If so we allocate new object there.
+ *
+ * When there is not enough room at the end, we start waiting for
+ * each sub object until we reach object_offset+object_size >=
+ * alloc_size, this object then become the sub object we return.
+ *
+ * Alignment can't be bigger than page size.
+ *
+ * Hole are not considered for allocation to keep things simple.
+ * Assumption is that there won't be hole (all object on same
+ * alignment).
+ */
+struct radeon_sa_manager {
+       struct radeon_bo        *bo;
+       struct list_head        sa_bo;
+       unsigned                size;
+       uint64_t                gpu_addr;
+       void                    *cpu_ptr;
+       uint32_t                domain;
+};
+
+struct radeon_sa_bo;
+
+/* sub-allocation buffer */
+struct radeon_sa_bo {
+       struct list_head                list;
+       struct radeon_sa_manager        *manager;
+       unsigned                        offset;
+       unsigned                        size;
+};
+
 /*
  * GEM objects.
  */
@@ -303,6 +385,46 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
                             uint32_t handle);
 
 /*
+ * Semaphores.
+ */
+struct radeon_ring;
+
+#define        RADEON_SEMAPHORE_BO_SIZE        256
+
+struct radeon_semaphore_driver {
+       rwlock_t                        lock;
+       struct list_head                bo;
+};
+
+struct radeon_semaphore_bo;
+
+/* everything here is constant */
+struct radeon_semaphore {
+       struct list_head                list;
+       uint64_t                        gpu_addr;
+       uint32_t                        *cpu_ptr;
+       struct radeon_semaphore_bo      *bo;
+};
+
+struct radeon_semaphore_bo {
+       struct list_head                list;
+       struct radeon_ib                *ib;
+       struct list_head                free;
+       struct radeon_semaphore         semaphores[RADEON_SEMAPHORE_BO_SIZE/8];
+       unsigned                        nused;
+};
+
+void radeon_semaphore_driver_fini(struct radeon_device *rdev);
+int radeon_semaphore_create(struct radeon_device *rdev,
+                           struct radeon_semaphore **semaphore);
+void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+                                 struct radeon_semaphore *semaphore);
+void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+                               struct radeon_semaphore *semaphore);
+void radeon_semaphore_free(struct radeon_device *rdev,
+                          struct radeon_semaphore *semaphore);
+
+/*
  * GART structures, functions & helpers
  */
 struct radeon_mc;
@@ -310,6 +432,7 @@ struct radeon_mc;
 #define RADEON_GPU_PAGE_SIZE 4096
 #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
 #define RADEON_GPU_PAGE_SHIFT 12
+#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
 
 struct radeon_gart {
        dma_addr_t                      table_addr;
@@ -320,7 +443,6 @@ struct radeon_gart {
        unsigned                        table_size;
        struct page                     **pages;
        dma_addr_t                      *pages_addr;
-       bool                            *ttm_alloced;
        bool                            ready;
 };
 
@@ -434,7 +556,7 @@ union radeon_irq_stat_regs {
 
 struct radeon_irq {
        bool            installed;
-       bool            sw_int;
+       bool            sw_int[RADEON_NUM_RINGS];
        bool            crtc_vblank_int[RADEON_MAX_CRTCS];
        bool            pflip[RADEON_MAX_CRTCS];
        wait_queue_head_t       vblank_queue;
@@ -444,7 +566,7 @@ struct radeon_irq {
        wait_queue_head_t       idle_queue;
        bool            hdmi[RADEON_MAX_HDMI_BLOCKS];
        spinlock_t sw_lock;
-       int sw_refcount;
+       int sw_refcount[RADEON_NUM_RINGS];
        union radeon_irq_stat_regs stat_regs;
        spinlock_t pflip_lock[RADEON_MAX_CRTCS];
        int pflip_refcount[RADEON_MAX_CRTCS];
@@ -452,22 +574,23 @@ struct radeon_irq {
 
 int radeon_irq_kms_init(struct radeon_device *rdev);
 void radeon_irq_kms_fini(struct radeon_device *rdev);
-void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
-void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
 void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
 void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
 
 /*
- * CP & ring.
+ * CP & rings.
  */
+
 struct radeon_ib {
-       struct list_head        list;
+       struct radeon_sa_bo     sa_bo;
        unsigned                idx;
+       uint32_t                length_dw;
        uint64_t                gpu_addr;
-       struct radeon_fence     *fence;
        uint32_t                *ptr;
-       uint32_t                length_dw;
-       bool                    free;
+       struct radeon_fence     *fence;
+       unsigned                vm_id;
 };
 
 /*
@@ -475,20 +598,22 @@ struct radeon_ib {
  * mutex protects scheduled_ibs, ready, alloc_bm
  */
 struct radeon_ib_pool {
-       struct mutex            mutex;
-       struct radeon_bo        *robj;
-       struct list_head        bogus_ib;
-       struct radeon_ib        ibs[RADEON_IB_POOL_SIZE];
-       bool                    ready;
-       unsigned                head_id;
+       struct mutex                    mutex;
+       struct radeon_sa_manager        sa_manager;
+       struct radeon_ib                ibs[RADEON_IB_POOL_SIZE];
+       bool                            ready;
+       unsigned                        head_id;
 };
 
-struct radeon_cp {
+struct radeon_ring {
        struct radeon_bo        *ring_obj;
        volatile uint32_t       *ring;
        unsigned                rptr;
+       unsigned                rptr_offs;
+       unsigned                rptr_reg;
        unsigned                wptr;
        unsigned                wptr_old;
+       unsigned                wptr_reg;
        unsigned                ring_size;
        unsigned                ring_free_dw;
        int                     count_dw;
@@ -497,6 +622,61 @@ struct radeon_cp {
        uint32_t                ptr_mask;
        struct mutex            mutex;
        bool                    ready;
+       u32                     ptr_reg_shift;
+       u32                     ptr_reg_mask;
+       u32                     nop;
+};
+
+/*
+ * VM
+ */
+struct radeon_vm {
+       struct list_head                list;
+       struct list_head                va;
+       int                             id;
+       unsigned                        last_pfn;
+       u64                             pt_gpu_addr;
+       u64                             *pt;
+       struct radeon_sa_bo             sa_bo;
+       struct mutex                    mutex;
+       /* last fence for cs using this vm */
+       struct radeon_fence             *fence;
+};
+
+struct radeon_vm_funcs {
+       int (*init)(struct radeon_device *rdev);
+       void (*fini)(struct radeon_device *rdev);
+       /* cs mutex must be lock for schedule_ib */
+       int (*bind)(struct radeon_device *rdev, struct radeon_vm *vm, int id);
+       void (*unbind)(struct radeon_device *rdev, struct radeon_vm *vm);
+       void (*tlb_flush)(struct radeon_device *rdev, struct radeon_vm *vm);
+       uint32_t (*page_flags)(struct radeon_device *rdev,
+                              struct radeon_vm *vm,
+                              uint32_t flags);
+       void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm,
+                       unsigned pfn, uint64_t addr, uint32_t flags);
+};
+
+struct radeon_vm_manager {
+       struct list_head                lru_vm;
+       uint32_t                        use_bitmap;
+       struct radeon_sa_manager        sa_manager;
+       uint32_t                        max_pfn;
+       /* fields constant after init */
+       const struct radeon_vm_funcs    *funcs;
+       /* number of VMIDs */
+       unsigned                        nvm;
+       /* vram base address for page table entry  */
+       u64                             vram_base_offset;
+       /* is vm enabled? */
+       bool                            enabled;
+};
+
+/*
+ * file private structure
+ */
+struct radeon_fpriv {
+       struct radeon_vm                vm;
 };
 
 /*
@@ -506,6 +686,7 @@ struct r600_ih {
        struct radeon_bo        *ring_obj;
        volatile uint32_t       *ring;
        unsigned                rptr;
+       unsigned                rptr_offs;
        unsigned                wptr;
        unsigned                wptr_old;
        unsigned                ring_size;
@@ -549,23 +730,29 @@ struct r600_blit {
 
 void r600_blit_suspend(struct radeon_device *rdev);
 
-int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib);
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+                 struct radeon_ib **ib, unsigned size);
 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
+bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib);
 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
 int radeon_ib_pool_init(struct radeon_device *rdev);
 void radeon_ib_pool_fini(struct radeon_device *rdev);
+int radeon_ib_pool_start(struct radeon_device *rdev);
+int radeon_ib_pool_suspend(struct radeon_device *rdev);
 int radeon_ib_test(struct radeon_device *rdev);
-extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
 /* Ring access between begin & end cannot sleep */
-void radeon_ring_free_size(struct radeon_device *rdev);
-int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw);
-int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
-void radeon_ring_commit(struct radeon_device *rdev);
-void radeon_ring_unlock_commit(struct radeon_device *rdev);
-void radeon_ring_unlock_undo(struct radeon_device *rdev);
-int radeon_ring_test(struct radeon_device *rdev);
-int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size);
-void radeon_ring_fini(struct radeon_device *rdev);
+int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
+int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
+                    unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
+                    u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
+void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
 
 
 /*
@@ -582,12 +769,12 @@ struct radeon_cs_reloc {
 struct radeon_cs_chunk {
        uint32_t                chunk_id;
        uint32_t                length_dw;
-       int kpage_idx[2];
-       uint32_t                *kpage[2];
+       int                     kpage_idx[2];
+       uint32_t                *kpage[2];
        uint32_t                *kdata;
-       void __user *user_ptr;
-       int last_copied_page;
-       int last_page_index;
+       void __user             *user_ptr;
+       int                     last_copied_page;
+       int                     last_page_index;
 };
 
 struct radeon_cs_parser {
@@ -605,14 +792,18 @@ struct radeon_cs_parser {
        struct radeon_cs_reloc  *relocs;
        struct radeon_cs_reloc  **relocs_ptr;
        struct list_head        validated;
+       bool                    sync_to_ring[RADEON_NUM_RINGS];
        /* indices of various chunks */
        int                     chunk_ib_idx;
        int                     chunk_relocs_idx;
+       int                     chunk_flags_idx;
        struct radeon_ib        *ib;
        void                    *track;
        unsigned                family;
        int                     parser_error;
-       bool                    keep_tiling_flags;
+       u32                     cs_flags;
+       u32                     ring;
+       s32                     priority;
 };
 
 extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
@@ -869,11 +1060,20 @@ void radeon_benchmark(struct radeon_device *rdev, int test_number);
  * Testing
  */
 void radeon_test_moves(struct radeon_device *rdev);
+void radeon_test_ring_sync(struct radeon_device *rdev,
+                          struct radeon_ring *cpA,
+                          struct radeon_ring *cpB);
+void radeon_test_syncing(struct radeon_device *rdev);
 
 
 /*
  * Debugfs
  */
+struct radeon_debugfs {
+       struct drm_info_list    *files;
+       unsigned                num_files;
+};
+
 int radeon_debugfs_add_files(struct radeon_device *rdev,
                             struct drm_info_list *files,
                             unsigned nfiles);
@@ -889,21 +1089,27 @@ struct radeon_asic {
        int (*resume)(struct radeon_device *rdev);
        int (*suspend)(struct radeon_device *rdev);
        void (*vga_set_state)(struct radeon_device *rdev, bool state);
-       bool (*gpu_is_lockup)(struct radeon_device *rdev);
+       bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
        int (*asic_reset)(struct radeon_device *rdev);
        void (*gart_tlb_flush)(struct radeon_device *rdev);
        int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
        int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
        void (*cp_fini)(struct radeon_device *rdev);
        void (*cp_disable)(struct radeon_device *rdev);
-       void (*cp_commit)(struct radeon_device *rdev);
        void (*ring_start)(struct radeon_device *rdev);
-       int (*ring_test)(struct radeon_device *rdev);
-       void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
+
+       struct {
+               void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
+               int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
+               void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
+               void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
+                                      struct radeon_semaphore *semaphore, bool emit_wait);
+       } ring[RADEON_NUM_RINGS];
+
+       int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
        int (*irq_set)(struct radeon_device *rdev);
        int (*irq_process)(struct radeon_device *rdev);
        u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
-       void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence);
        int (*cs_parse)(struct radeon_cs_parser *p);
        int (*copy_blit)(struct radeon_device *rdev,
                         uint64_t src_offset,
@@ -1132,6 +1338,8 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *filp);
 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *filp);
+int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *filp);
 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *filp);
@@ -1231,11 +1439,10 @@ struct radeon_device {
        struct radeon_mode_info         mode_info;
        struct radeon_scratch           scratch;
        struct radeon_mman              mman;
-       struct radeon_fence_driver      fence_drv;
-       struct radeon_cp                cp;
-       /* cayman compute rings */
-       struct radeon_cp                cp1;
-       struct radeon_cp                cp2;
+       rwlock_t                        fence_lock;
+       struct radeon_fence_driver      fence_drv[RADEON_NUM_RINGS];
+       struct radeon_semaphore_driver  semaphore_drv;
+       struct radeon_ring              ring[RADEON_NUM_RINGS];
        struct radeon_ib_pool           ib_pool;
        struct radeon_irq               irq;
        struct radeon_asic              *asic;
@@ -1279,6 +1486,13 @@ struct radeon_device {
        struct drm_file *cmask_filp;
        /* i2c buses */
        struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
+       /* debugfs */
+       struct radeon_debugfs   debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
+       unsigned                debugfs_count;
+       /* virtual memory */
+       struct radeon_vm_manager        vm_manager;
+       /* ring used for bo copies */
+       u32                             copy_ring;
 };
 
 int radeon_device_init(struct radeon_device *rdev,
@@ -1414,18 +1628,17 @@ void radeon_atombios_fini(struct radeon_device *rdev);
 /*
  * RING helpers.
  */
-
 #if DRM_DEBUG_CODE == 0
-static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
+static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
 {
-       rdev->cp.ring[rdev->cp.wptr++] = v;
-       rdev->cp.wptr &= rdev->cp.ptr_mask;
-       rdev->cp.count_dw--;
-       rdev->cp.ring_free_dw--;
+       ring->ring[ring->wptr++] = v;
+       ring->wptr &= ring->ptr_mask;
+       ring->count_dw--;
+       ring->ring_free_dw--;
 }
 #else
 /* With debugging this is just too big to inline */
-void radeon_ring_write(struct radeon_device *rdev, uint32_t v);
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
 #endif
 
 /*
@@ -1437,18 +1650,19 @@ void radeon_ring_write(struct radeon_device *rdev, uint32_t v);
 #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
 #define radeon_cs_parse(p) rdev->asic->cs_parse((p))
 #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
-#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
+#define radeon_gpu_is_lockup(rdev, cp) (rdev)->asic->gpu_is_lockup((rdev), (cp))
 #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
 #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
 #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
-#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
 #define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
-#define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev))
-#define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib))
+#define radeon_ring_test(rdev, cp) (rdev)->asic->ring_test((rdev), (cp))
+#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
+#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
 #define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
 #define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
 #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
-#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence))
+#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
+#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
 #define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
 #define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
 #define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
@@ -1503,6 +1717,33 @@ extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
 extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
 
 /*
+ * vm
+ */
+int radeon_vm_manager_init(struct radeon_device *rdev);
+void radeon_vm_manager_fini(struct radeon_device *rdev);
+int radeon_vm_manager_start(struct radeon_device *rdev);
+int radeon_vm_manager_suspend(struct radeon_device *rdev);
+int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
+int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
+int radeon_vm_bo_update_pte(struct radeon_device *rdev,
+                           struct radeon_vm *vm,
+                           struct radeon_bo *bo,
+                           struct ttm_mem_reg *mem);
+void radeon_vm_bo_invalidate(struct radeon_device *rdev,
+                            struct radeon_bo *bo);
+int radeon_vm_bo_add(struct radeon_device *rdev,
+                    struct radeon_vm *vm,
+                    struct radeon_bo *bo,
+                    uint64_t offset,
+                    uint32_t flags);
+int radeon_vm_bo_rmv(struct radeon_device *rdev,
+                    struct radeon_vm *vm,
+                    struct radeon_bo *bo);
+
+
+/*
  * R600 vram scratch functions
  */
 int r600_vram_scratch_init(struct radeon_device *rdev);
index a2e1eae..36a6192 100644 (file)
@@ -138,14 +138,18 @@ static struct radeon_asic r100_asic = {
        .asic_reset = &r100_asic_reset,
        .gart_tlb_flush = &r100_pci_gart_tlb_flush,
        .gart_set_page = &r100_pci_gart_set_page,
-       .cp_commit = &r100_cp_commit,
        .ring_start = &r100_ring_start,
        .ring_test = &r100_ring_test,
-       .ring_ib_execute = &r100_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r100_ring_ib_execute,
+                       .emit_fence = &r100_fence_ring_emit,
+                       .emit_semaphore = &r100_semaphore_ring_emit,
+               }
+       },
        .irq_set = &r100_irq_set,
        .irq_process = &r100_irq_process,
        .get_vblank_counter = &r100_get_vblank_counter,
-       .fence_ring_emit = &r100_fence_ring_emit,
        .cs_parse = &r100_cs_parse,
        .copy_blit = &r100_copy_blit,
        .copy_dma = NULL,
@@ -186,14 +190,18 @@ static struct radeon_asic r200_asic = {
        .asic_reset = &r100_asic_reset,
        .gart_tlb_flush = &r100_pci_gart_tlb_flush,
        .gart_set_page = &r100_pci_gart_set_page,
-       .cp_commit = &r100_cp_commit,
        .ring_start = &r100_ring_start,
        .ring_test = &r100_ring_test,
-       .ring_ib_execute = &r100_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r100_ring_ib_execute,
+                       .emit_fence = &r100_fence_ring_emit,
+                       .emit_semaphore = &r100_semaphore_ring_emit,
+               }
+       },
        .irq_set = &r100_irq_set,
        .irq_process = &r100_irq_process,
        .get_vblank_counter = &r100_get_vblank_counter,
-       .fence_ring_emit = &r100_fence_ring_emit,
        .cs_parse = &r100_cs_parse,
        .copy_blit = &r100_copy_blit,
        .copy_dma = &r200_copy_dma,
@@ -233,14 +241,18 @@ static struct radeon_asic r300_asic = {
        .asic_reset = &r300_asic_reset,
        .gart_tlb_flush = &r100_pci_gart_tlb_flush,
        .gart_set_page = &r100_pci_gart_set_page,
-       .cp_commit = &r100_cp_commit,
        .ring_start = &r300_ring_start,
        .ring_test = &r100_ring_test,
-       .ring_ib_execute = &r100_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r100_ring_ib_execute,
+                       .emit_fence = &r300_fence_ring_emit,
+                       .emit_semaphore = &r100_semaphore_ring_emit,
+               }
+       },
        .irq_set = &r100_irq_set,
        .irq_process = &r100_irq_process,
        .get_vblank_counter = &r100_get_vblank_counter,
-       .fence_ring_emit = &r300_fence_ring_emit,
        .cs_parse = &r300_cs_parse,
        .copy_blit = &r100_copy_blit,
        .copy_dma = &r200_copy_dma,
@@ -281,14 +293,18 @@ static struct radeon_asic r300_asic_pcie = {
        .asic_reset = &r300_asic_reset,
        .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
        .gart_set_page = &rv370_pcie_gart_set_page,
-       .cp_commit = &r100_cp_commit,
        .ring_start = &r300_ring_start,
        .ring_test = &r100_ring_test,
-       .ring_ib_execute = &r100_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r100_ring_ib_execute,
+                       .emit_fence = &r300_fence_ring_emit,
+                       .emit_semaphore = &r100_semaphore_ring_emit,
+               }
+       },
        .irq_set = &r100_irq_set,
        .irq_process = &r100_irq_process,
        .get_vblank_counter = &r100_get_vblank_counter,
-       .fence_ring_emit = &r300_fence_ring_emit,
        .cs_parse = &r300_cs_parse,
        .copy_blit = &r100_copy_blit,
        .copy_dma = &r200_copy_dma,
@@ -328,14 +344,18 @@ static struct radeon_asic r420_asic = {
        .asic_reset = &r300_asic_reset,
        .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
        .gart_set_page = &rv370_pcie_gart_set_page,
-       .cp_commit = &r100_cp_commit,
        .ring_start = &r300_ring_start,
        .ring_test = &r100_ring_test,
-       .ring_ib_execute = &r100_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r100_ring_ib_execute,
+                       .emit_fence = &r300_fence_ring_emit,
+                       .emit_semaphore = &r100_semaphore_ring_emit,
+               }
+       },
        .irq_set = &r100_irq_set,
        .irq_process = &r100_irq_process,
        .get_vblank_counter = &r100_get_vblank_counter,
-       .fence_ring_emit = &r300_fence_ring_emit,
        .cs_parse = &r300_cs_parse,
        .copy_blit = &r100_copy_blit,
        .copy_dma = &r200_copy_dma,
@@ -376,14 +396,18 @@ static struct radeon_asic rs400_asic = {
        .asic_reset = &r300_asic_reset,
        .gart_tlb_flush = &rs400_gart_tlb_flush,
        .gart_set_page = &rs400_gart_set_page,
-       .cp_commit = &r100_cp_commit,
        .ring_start = &r300_ring_start,
        .ring_test = &r100_ring_test,
-       .ring_ib_execute = &r100_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r100_ring_ib_execute,
+                       .emit_fence = &r300_fence_ring_emit,
+                       .emit_semaphore = &r100_semaphore_ring_emit,
+               }
+       },
        .irq_set = &r100_irq_set,
        .irq_process = &r100_irq_process,
        .get_vblank_counter = &r100_get_vblank_counter,
-       .fence_ring_emit = &r300_fence_ring_emit,
        .cs_parse = &r300_cs_parse,
        .copy_blit = &r100_copy_blit,
        .copy_dma = &r200_copy_dma,
@@ -424,14 +448,18 @@ static struct radeon_asic rs600_asic = {
        .asic_reset = &rs600_asic_reset,
        .gart_tlb_flush = &rs600_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
-       .cp_commit = &r100_cp_commit,
        .ring_start = &r300_ring_start,
        .ring_test = &r100_ring_test,
-       .ring_ib_execute = &r100_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r100_ring_ib_execute,
+                       .emit_fence = &r300_fence_ring_emit,
+                       .emit_semaphore = &r100_semaphore_ring_emit,
+               }
+       },
        .irq_set = &rs600_irq_set,
        .irq_process = &rs600_irq_process,
        .get_vblank_counter = &rs600_get_vblank_counter,
-       .fence_ring_emit = &r300_fence_ring_emit,
        .cs_parse = &r300_cs_parse,
        .copy_blit = &r100_copy_blit,
        .copy_dma = &r200_copy_dma,
@@ -472,14 +500,18 @@ static struct radeon_asic rs690_asic = {
        .asic_reset = &rs600_asic_reset,
        .gart_tlb_flush = &rs400_gart_tlb_flush,
        .gart_set_page = &rs400_gart_set_page,
-       .cp_commit = &r100_cp_commit,
        .ring_start = &r300_ring_start,
        .ring_test = &r100_ring_test,
-       .ring_ib_execute = &r100_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r100_ring_ib_execute,
+                       .emit_fence = &r300_fence_ring_emit,
+                       .emit_semaphore = &r100_semaphore_ring_emit,
+               }
+       },
        .irq_set = &rs600_irq_set,
        .irq_process = &rs600_irq_process,
        .get_vblank_counter = &rs600_get_vblank_counter,
-       .fence_ring_emit = &r300_fence_ring_emit,
        .cs_parse = &r300_cs_parse,
        .copy_blit = &r100_copy_blit,
        .copy_dma = &r200_copy_dma,
@@ -520,14 +552,18 @@ static struct radeon_asic rv515_asic = {
        .asic_reset = &rs600_asic_reset,
        .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
        .gart_set_page = &rv370_pcie_gart_set_page,
-       .cp_commit = &r100_cp_commit,
        .ring_start = &rv515_ring_start,
        .ring_test = &r100_ring_test,
-       .ring_ib_execute = &r100_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r100_ring_ib_execute,
+                       .emit_fence = &r300_fence_ring_emit,
+                       .emit_semaphore = &r100_semaphore_ring_emit,
+               }
+       },
        .irq_set = &rs600_irq_set,
        .irq_process = &rs600_irq_process,
        .get_vblank_counter = &rs600_get_vblank_counter,
-       .fence_ring_emit = &r300_fence_ring_emit,
        .cs_parse = &r300_cs_parse,
        .copy_blit = &r100_copy_blit,
        .copy_dma = &r200_copy_dma,
@@ -568,14 +604,18 @@ static struct radeon_asic r520_asic = {
        .asic_reset = &rs600_asic_reset,
        .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
        .gart_set_page = &rv370_pcie_gart_set_page,
-       .cp_commit = &r100_cp_commit,
        .ring_start = &rv515_ring_start,
        .ring_test = &r100_ring_test,
-       .ring_ib_execute = &r100_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r100_ring_ib_execute,
+                       .emit_fence = &r300_fence_ring_emit,
+                       .emit_semaphore = &r100_semaphore_ring_emit,
+               }
+       },
        .irq_set = &rs600_irq_set,
        .irq_process = &rs600_irq_process,
        .get_vblank_counter = &rs600_get_vblank_counter,
-       .fence_ring_emit = &r300_fence_ring_emit,
        .cs_parse = &r300_cs_parse,
        .copy_blit = &r100_copy_blit,
        .copy_dma = &r200_copy_dma,
@@ -611,18 +651,22 @@ static struct radeon_asic r600_asic = {
        .fini = &r600_fini,
        .suspend = &r600_suspend,
        .resume = &r600_resume,
-       .cp_commit = &r600_cp_commit,
        .vga_set_state = &r600_vga_set_state,
        .gpu_is_lockup = &r600_gpu_is_lockup,
        .asic_reset = &r600_asic_reset,
        .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
        .ring_test = &r600_ring_test,
-       .ring_ib_execute = &r600_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r600_ring_ib_execute,
+                       .emit_fence = &r600_fence_ring_emit,
+                       .emit_semaphore = &r600_semaphore_ring_emit,
+               }
+       },
        .irq_set = &r600_irq_set,
        .irq_process = &r600_irq_process,
        .get_vblank_counter = &rs600_get_vblank_counter,
-       .fence_ring_emit = &r600_fence_ring_emit,
        .cs_parse = &r600_cs_parse,
        .copy_blit = &r600_copy_blit,
        .copy_dma = NULL,
@@ -658,18 +702,22 @@ static struct radeon_asic rs780_asic = {
        .fini = &r600_fini,
        .suspend = &r600_suspend,
        .resume = &r600_resume,
-       .cp_commit = &r600_cp_commit,
        .gpu_is_lockup = &r600_gpu_is_lockup,
        .vga_set_state = &r600_vga_set_state,
        .asic_reset = &r600_asic_reset,
        .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
        .ring_test = &r600_ring_test,
-       .ring_ib_execute = &r600_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r600_ring_ib_execute,
+                       .emit_fence = &r600_fence_ring_emit,
+                       .emit_semaphore = &r600_semaphore_ring_emit,
+               }
+       },
        .irq_set = &r600_irq_set,
        .irq_process = &r600_irq_process,
        .get_vblank_counter = &rs600_get_vblank_counter,
-       .fence_ring_emit = &r600_fence_ring_emit,
        .cs_parse = &r600_cs_parse,
        .copy_blit = &r600_copy_blit,
        .copy_dma = NULL,
@@ -705,18 +753,22 @@ static struct radeon_asic rv770_asic = {
        .fini = &rv770_fini,
        .suspend = &rv770_suspend,
        .resume = &rv770_resume,
-       .cp_commit = &r600_cp_commit,
        .asic_reset = &r600_asic_reset,
        .gpu_is_lockup = &r600_gpu_is_lockup,
        .vga_set_state = &r600_vga_set_state,
        .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
        .ring_test = &r600_ring_test,
-       .ring_ib_execute = &r600_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &r600_ring_ib_execute,
+                       .emit_fence = &r600_fence_ring_emit,
+                       .emit_semaphore = &r600_semaphore_ring_emit,
+               }
+       },
        .irq_set = &r600_irq_set,
        .irq_process = &r600_irq_process,
        .get_vblank_counter = &rs600_get_vblank_counter,
-       .fence_ring_emit = &r600_fence_ring_emit,
        .cs_parse = &r600_cs_parse,
        .copy_blit = &r600_copy_blit,
        .copy_dma = NULL,
@@ -752,18 +804,22 @@ static struct radeon_asic evergreen_asic = {
        .fini = &evergreen_fini,
        .suspend = &evergreen_suspend,
        .resume = &evergreen_resume,
-       .cp_commit = &r600_cp_commit,
        .gpu_is_lockup = &evergreen_gpu_is_lockup,
        .asic_reset = &evergreen_asic_reset,
        .vga_set_state = &r600_vga_set_state,
        .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
        .ring_test = &r600_ring_test,
-       .ring_ib_execute = &evergreen_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &evergreen_ring_ib_execute,
+                       .emit_fence = &r600_fence_ring_emit,
+                       .emit_semaphore = &r600_semaphore_ring_emit,
+               }
+       },
        .irq_set = &evergreen_irq_set,
        .irq_process = &evergreen_irq_process,
        .get_vblank_counter = &evergreen_get_vblank_counter,
-       .fence_ring_emit = &r600_fence_ring_emit,
        .cs_parse = &evergreen_cs_parse,
        .copy_blit = &r600_copy_blit,
        .copy_dma = NULL,
@@ -799,18 +855,22 @@ static struct radeon_asic sumo_asic = {
        .fini = &evergreen_fini,
        .suspend = &evergreen_suspend,
        .resume = &evergreen_resume,
-       .cp_commit = &r600_cp_commit,
        .gpu_is_lockup = &evergreen_gpu_is_lockup,
        .asic_reset = &evergreen_asic_reset,
        .vga_set_state = &r600_vga_set_state,
        .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
        .ring_test = &r600_ring_test,
-       .ring_ib_execute = &evergreen_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &evergreen_ring_ib_execute,
+                       .emit_fence = &r600_fence_ring_emit,
+                       .emit_semaphore = &r600_semaphore_ring_emit,
+               }
+       },
        .irq_set = &evergreen_irq_set,
        .irq_process = &evergreen_irq_process,
        .get_vblank_counter = &evergreen_get_vblank_counter,
-       .fence_ring_emit = &r600_fence_ring_emit,
        .cs_parse = &evergreen_cs_parse,
        .copy_blit = &r600_copy_blit,
        .copy_dma = NULL,
@@ -846,18 +906,22 @@ static struct radeon_asic btc_asic = {
        .fini = &evergreen_fini,
        .suspend = &evergreen_suspend,
        .resume = &evergreen_resume,
-       .cp_commit = &r600_cp_commit,
        .gpu_is_lockup = &evergreen_gpu_is_lockup,
        .asic_reset = &evergreen_asic_reset,
        .vga_set_state = &r600_vga_set_state,
        .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
        .ring_test = &r600_ring_test,
-       .ring_ib_execute = &evergreen_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &evergreen_ring_ib_execute,
+                       .emit_fence = &r600_fence_ring_emit,
+                       .emit_semaphore = &r600_semaphore_ring_emit,
+               }
+       },
        .irq_set = &evergreen_irq_set,
        .irq_process = &evergreen_irq_process,
        .get_vblank_counter = &evergreen_get_vblank_counter,
-       .fence_ring_emit = &r600_fence_ring_emit,
        .cs_parse = &evergreen_cs_parse,
        .copy_blit = &r600_copy_blit,
        .copy_dma = NULL,
@@ -888,23 +952,50 @@ static struct radeon_asic btc_asic = {
        .post_page_flip = &evergreen_post_page_flip,
 };
 
+static const struct radeon_vm_funcs cayman_vm_funcs = {
+       .init = &cayman_vm_init,
+       .fini = &cayman_vm_fini,
+       .bind = &cayman_vm_bind,
+       .unbind = &cayman_vm_unbind,
+       .tlb_flush = &cayman_vm_tlb_flush,
+       .page_flags = &cayman_vm_page_flags,
+       .set_page = &cayman_vm_set_page,
+};
+
 static struct radeon_asic cayman_asic = {
        .init = &cayman_init,
        .fini = &cayman_fini,
        .suspend = &cayman_suspend,
        .resume = &cayman_resume,
-       .cp_commit = &r600_cp_commit,
        .gpu_is_lockup = &cayman_gpu_is_lockup,
        .asic_reset = &cayman_asic_reset,
        .vga_set_state = &r600_vga_set_state,
        .gart_tlb_flush = &cayman_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
        .ring_test = &r600_ring_test,
-       .ring_ib_execute = &evergreen_ring_ib_execute,
+       .ring = {
+               [RADEON_RING_TYPE_GFX_INDEX] = {
+                       .ib_execute = &cayman_ring_ib_execute,
+                       .ib_parse = &evergreen_ib_parse,
+                       .emit_fence = &cayman_fence_ring_emit,
+                       .emit_semaphore = &r600_semaphore_ring_emit,
+               },
+               [CAYMAN_RING_TYPE_CP1_INDEX] = {
+                       .ib_execute = &cayman_ring_ib_execute,
+                       .ib_parse = &evergreen_ib_parse,
+                       .emit_fence = &cayman_fence_ring_emit,
+                       .emit_semaphore = &r600_semaphore_ring_emit,
+               },
+               [CAYMAN_RING_TYPE_CP2_INDEX] = {
+                       .ib_execute = &cayman_ring_ib_execute,
+                       .ib_parse = &evergreen_ib_parse,
+                       .emit_fence = &cayman_fence_ring_emit,
+                       .emit_semaphore = &r600_semaphore_ring_emit,
+               }
+       },
        .irq_set = &evergreen_irq_set,
        .irq_process = &evergreen_irq_process,
        .get_vblank_counter = &evergreen_get_vblank_counter,
-       .fence_ring_emit = &r600_fence_ring_emit,
        .cs_parse = &evergreen_cs_parse,
        .copy_blit = &r600_copy_blit,
        .copy_dma = NULL,
@@ -945,6 +1036,9 @@ int radeon_asic_init(struct radeon_device *rdev)
        else
                rdev->num_crtc = 2;
 
+       /* set the ring used for bo copies */
+       rdev->copy_ring = RADEON_RING_TYPE_GFX_INDEX;
+
        switch (rdev->family) {
        case CHIP_R100:
        case CHIP_RV100:
@@ -1050,6 +1144,7 @@ int radeon_asic_init(struct radeon_device *rdev)
                rdev->asic = &cayman_asic;
                /* set num crtcs */
                rdev->num_crtc = 6;
+               rdev->vm_manager.funcs = &cayman_vm_funcs;
                break;
        default:
                /* FIXME: not supported yet */
index 5991484..6304aef 100644 (file)
@@ -58,17 +58,20 @@ void r100_fini(struct radeon_device *rdev);
 int r100_suspend(struct radeon_device *rdev);
 int r100_resume(struct radeon_device *rdev);
 void r100_vga_set_state(struct radeon_device *rdev, bool state);
-bool r100_gpu_is_lockup(struct radeon_device *rdev);
+bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
 int r100_asic_reset(struct radeon_device *rdev);
 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
 void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
 int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
-void r100_cp_commit(struct radeon_device *rdev);
 void r100_ring_start(struct radeon_device *rdev);
 int r100_irq_set(struct radeon_device *rdev);
 int r100_irq_process(struct radeon_device *rdev);
 void r100_fence_ring_emit(struct radeon_device *rdev,
                          struct radeon_fence *fence);
+void r100_semaphore_ring_emit(struct radeon_device *rdev,
+                             struct radeon_ring *cp,
+                             struct radeon_semaphore *semaphore,
+                             bool emit_wait);
 int r100_cs_parse(struct radeon_cs_parser *p);
 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -83,7 +86,7 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
 void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
 void r100_bandwidth_update(struct radeon_device *rdev);
 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-int r100_ring_test(struct radeon_device *rdev);
+int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
 void r100_hpd_init(struct radeon_device *rdev);
 void r100_hpd_fini(struct radeon_device *rdev);
 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -101,12 +104,12 @@ void r100_pci_gart_disable(struct radeon_device *rdev);
 int r100_debugfs_mc_info_init(struct radeon_device *rdev);
 int r100_gui_wait_for_idle(struct radeon_device *rdev);
 void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
-                           struct radeon_cp *cp);
+                           struct radeon_ring *cp);
 bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
                           struct r100_gpu_lockup *lockup,
-                          struct radeon_cp *cp);
+                          struct radeon_ring *cp);
 void r100_ib_fini(struct radeon_device *rdev);
-int r100_ib_init(struct radeon_device *rdev);
+int r100_ib_test(struct radeon_device *rdev);
 void r100_irq_disable(struct radeon_device *rdev);
 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
 void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
@@ -154,7 +157,7 @@ extern int r300_init(struct radeon_device *rdev);
 extern void r300_fini(struct radeon_device *rdev);
 extern int r300_suspend(struct radeon_device *rdev);
 extern int r300_resume(struct radeon_device *rdev);
-extern bool r300_gpu_is_lockup(struct radeon_device *rdev);
+extern bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
 extern int r300_asic_reset(struct radeon_device *rdev);
 extern void r300_ring_start(struct radeon_device *rdev);
 extern void r300_fence_ring_emit(struct radeon_device *rdev,
@@ -293,22 +296,25 @@ int r600_resume(struct radeon_device *rdev);
 void r600_vga_set_state(struct radeon_device *rdev, bool state);
 int r600_wb_init(struct radeon_device *rdev);
 void r600_wb_fini(struct radeon_device *rdev);
-void r600_cp_commit(struct radeon_device *rdev);
 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
 uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
 void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 int r600_cs_parse(struct radeon_cs_parser *p);
 void r600_fence_ring_emit(struct radeon_device *rdev,
                          struct radeon_fence *fence);
-bool r600_gpu_is_lockup(struct radeon_device *rdev);
+void r600_semaphore_ring_emit(struct radeon_device *rdev,
+                             struct radeon_ring *cp,
+                             struct radeon_semaphore *semaphore,
+                             bool emit_wait);
+bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
 int r600_asic_reset(struct radeon_device *rdev);
 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
                         uint32_t tiling_flags, uint32_t pitch,
                         uint32_t offset, uint32_t obj_size);
 void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
-int r600_ib_test(struct radeon_device *rdev);
+int r600_ib_test(struct radeon_device *rdev, int ring);
 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-int r600_ring_test(struct radeon_device *rdev);
+int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
 int r600_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset, uint64_t dst_offset,
                   unsigned num_gpu_pages, struct radeon_fence *fence);
@@ -328,7 +334,7 @@ extern int r600_get_pcie_lanes(struct radeon_device *rdev);
 bool r600_card_posted(struct radeon_device *rdev);
 void r600_cp_stop(struct radeon_device *rdev);
 int r600_cp_start(struct radeon_device *rdev);
-void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
+void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size);
 int r600_cp_resume(struct radeon_device *rdev);
 void r600_cp_fini(struct radeon_device *rdev);
 int r600_count_pipe_bits(uint32_t val);
@@ -397,7 +403,7 @@ int evergreen_init(struct radeon_device *rdev);
 void evergreen_fini(struct radeon_device *rdev);
 int evergreen_suspend(struct radeon_device *rdev);
 int evergreen_resume(struct radeon_device *rdev);
-bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
 int evergreen_asic_reset(struct radeon_device *rdev);
 void evergreen_bandwidth_update(struct radeon_device *rdev);
 void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
@@ -423,12 +429,26 @@ int evergreen_blit_init(struct radeon_device *rdev);
 /*
  * cayman
  */
+void cayman_fence_ring_emit(struct radeon_device *rdev,
+                           struct radeon_fence *fence);
 void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
 int cayman_init(struct radeon_device *rdev);
 void cayman_fini(struct radeon_device *rdev);
 int cayman_suspend(struct radeon_device *rdev);
 int cayman_resume(struct radeon_device *rdev);
-bool cayman_gpu_is_lockup(struct radeon_device *rdev);
+bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
 int cayman_asic_reset(struct radeon_device *rdev);
+void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int cayman_vm_init(struct radeon_device *rdev);
+void cayman_vm_fini(struct radeon_device *rdev);
+int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id);
+void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
+void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm);
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
+                             struct radeon_vm *vm,
+                             uint32_t flags);
+void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
+                       unsigned pfn, uint64_t addr, uint32_t flags);
+int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 
 #endif
index 17e1a9b..815f234 100644 (file)
@@ -43,7 +43,7 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
 
        start_jiffies = jiffies;
        for (i = 0; i < n; i++) {
-               r = radeon_fence_create(rdev, &fence);
+               r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
                if (r)
                        return r;
 
@@ -229,21 +229,21 @@ void radeon_benchmark(struct radeon_device *rdev, int test_number)
                break;
        case 6:
                /* GTT to VRAM, buffer size sweep, common modes */
-               for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+               for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
                        radeon_benchmark_move(rdev, common_modes[i],
                                              RADEON_GEM_DOMAIN_GTT,
                                              RADEON_GEM_DOMAIN_VRAM);
                break;
        case 7:
                /* VRAM to GTT, buffer size sweep, common modes */
-               for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+               for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
                        radeon_benchmark_move(rdev, common_modes[i],
                                              RADEON_GEM_DOMAIN_VRAM,
                                              RADEON_GEM_DOMAIN_GTT);
                break;
        case 8:
                /* VRAM to VRAM, buffer size sweep, common modes */
-               for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+               for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
                        radeon_benchmark_move(rdev, common_modes[i],
                                              RADEON_GEM_DOMAIN_VRAM,
                                              RADEON_GEM_DOMAIN_VRAM);
index 29afd71..435a3d9 100644 (file)
@@ -58,7 +58,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 
                duplicate = false;
                r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
-               for (j = 0; j < p->nrelocs; j++) {
+               for (j = 0; j < i; j++) {
                        if (r->handle == p->relocs[j].handle) {
                                p->relocs_ptr[i] = &p->relocs[j];
                                duplicate = true;
@@ -84,16 +84,75 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
                        p->relocs[i].flags = r->flags;
                        radeon_bo_list_add_object(&p->relocs[i].lobj,
                                                  &p->validated);
-               }
+
+                       if (p->relocs[i].robj->tbo.sync_obj && !(r->flags & RADEON_RELOC_DONT_SYNC)) {
+                               struct radeon_fence *fence = p->relocs[i].robj->tbo.sync_obj;
+                               if (!radeon_fence_signaled(fence)) {
+                                       p->sync_to_ring[fence->ring] = true;
+                               }
+                       }
+               } else
+                       p->relocs[i].handle = 0;
        }
        return radeon_bo_list_validate(&p->validated);
 }
 
+static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
+{
+       p->priority = priority;
+
+       switch (ring) {
+       default:
+               DRM_ERROR("unknown ring id: %d\n", ring);
+               return -EINVAL;
+       case RADEON_CS_RING_GFX:
+               p->ring = RADEON_RING_TYPE_GFX_INDEX;
+               break;
+       case RADEON_CS_RING_COMPUTE:
+               /* for now */
+               p->ring = RADEON_RING_TYPE_GFX_INDEX;
+               break;
+       }
+       return 0;
+}
+
+static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
+{
+       int i, r;
+
+       for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+               /* no need to sync to our own or unused rings */
+               if (i == p->ring || !p->sync_to_ring[i] || !p->rdev->ring[i].ready)
+                       continue;
+
+               if (!p->ib->fence->semaphore) {
+                       r = radeon_semaphore_create(p->rdev, &p->ib->fence->semaphore);
+                       if (r)
+                               return r;
+               }
+
+               r = radeon_ring_lock(p->rdev, &p->rdev->ring[i], 3);
+               if (r)
+                       return r;
+               radeon_semaphore_emit_signal(p->rdev, i, p->ib->fence->semaphore);
+               radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[i]);
+
+               r = radeon_ring_lock(p->rdev, &p->rdev->ring[p->ring], 3);
+               if (r)
+                       return r;
+               radeon_semaphore_emit_wait(p->rdev, p->ring, p->ib->fence->semaphore);
+               radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[p->ring]);
+       }
+       return 0;
+}
+
 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
 {
        struct drm_radeon_cs *cs = data;
        uint64_t *chunk_array_ptr;
-       unsigned size, i, flags = 0;
+       unsigned size, i;
+       u32 ring = RADEON_CS_RING_GFX;
+       s32 priority = 0;
 
        if (!cs->num_chunks) {
                return 0;
@@ -103,6 +162,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
        p->idx = 0;
        p->chunk_ib_idx = -1;
        p->chunk_relocs_idx = -1;
+       p->chunk_flags_idx = -1;
        p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
        if (p->chunks_array == NULL) {
                return -ENOMEM;
@@ -112,6 +172,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                               sizeof(uint64_t)*cs->num_chunks)) {
                return -EFAULT;
        }
+       p->cs_flags = 0;
        p->nchunks = cs->num_chunks;
        p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
        if (p->chunks == NULL) {
@@ -140,16 +201,19 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                        if (p->chunks[i].length_dw == 0)
                                return -EINVAL;
                }
-               if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS &&
-                   !p->chunks[i].length_dw) {
-                       return -EINVAL;
+               if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+                       p->chunk_flags_idx = i;
+                       /* zero length flags aren't useful */
+                       if (p->chunks[i].length_dw == 0)
+                               return -EINVAL;
                }
 
                p->chunks[i].length_dw = user_chunk.length_dw;
                p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
 
                cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
-               if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) {
+               if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
+                   (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
                        size = p->chunks[i].length_dw * sizeof(uint32_t);
                        p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
                        if (p->chunks[i].kdata == NULL) {
@@ -160,29 +224,58 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                                return -EFAULT;
                        }
                        if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
-                               flags = p->chunks[i].kdata[0];
+                               p->cs_flags = p->chunks[i].kdata[0];
+                               if (p->chunks[i].length_dw > 1)
+                                       ring = p->chunks[i].kdata[1];
+                               if (p->chunks[i].length_dw > 2)
+                                       priority = (s32)p->chunks[i].kdata[2];
                        }
-               } else {
-                       p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
-                       p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
-                       if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
-                               kfree(p->chunks[i].kpage[0]);
-                               kfree(p->chunks[i].kpage[1]);
-                               return -ENOMEM;
-                       }
-                       p->chunks[i].kpage_idx[0] = -1;
-                       p->chunks[i].kpage_idx[1] = -1;
-                       p->chunks[i].last_copied_page = -1;
-                       p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE;
                }
        }
-       if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
-               DRM_ERROR("cs IB too big: %d\n",
-                         p->chunks[p->chunk_ib_idx].length_dw);
+
+       if ((p->cs_flags & RADEON_CS_USE_VM) &&
+           !p->rdev->vm_manager.enabled) {
+               DRM_ERROR("VM not active on asic!\n");
+               if (p->chunk_relocs_idx != -1)
+                       kfree(p->chunks[p->chunk_relocs_idx].kdata);
+               if (p->chunk_flags_idx != -1)
+                       kfree(p->chunks[p->chunk_flags_idx].kdata);
                return -EINVAL;
        }
 
-       p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0;
+       if (radeon_cs_get_ring(p, ring, priority)) {
+               if (p->chunk_relocs_idx != -1)
+                       kfree(p->chunks[p->chunk_relocs_idx].kdata);
+               if (p->chunk_flags_idx != -1)
+                       kfree(p->chunks[p->chunk_flags_idx].kdata);
+               return -EINVAL;
+       }
+
+
+       /* deal with non-vm */
+       if ((p->chunk_ib_idx != -1) &&
+           ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
+           (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
+               if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
+                       DRM_ERROR("cs IB too big: %d\n",
+                                 p->chunks[p->chunk_ib_idx].length_dw);
+                       return -EINVAL;
+               }
+               p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+               p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+               if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
+                   p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
+                       kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
+                       kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
+                       return -ENOMEM;
+               }
+               p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
+               p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
+               p->chunks[p->chunk_ib_idx].last_copied_page = -1;
+               p->chunks[p->chunk_ib_idx].last_page_index =
+                       ((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
+       }
+
        return 0;
 }
 
@@ -224,11 +317,139 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
        radeon_ib_free(parser->rdev, &parser->ib);
 }
 
+static int radeon_cs_ib_chunk(struct radeon_device *rdev,
+                             struct radeon_cs_parser *parser)
+{
+       struct radeon_cs_chunk *ib_chunk;
+       int r;
+
+       if (parser->chunk_ib_idx == -1)
+               return 0;
+
+       if (parser->cs_flags & RADEON_CS_USE_VM)
+               return 0;
+
+       ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+       /* Copy the packet into the IB, the parser will read from the
+        * input memory (cached) and write to the IB (which can be
+        * uncached).
+        */
+       r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
+                          ib_chunk->length_dw * 4);
+       if (r) {
+               DRM_ERROR("Failed to get ib !\n");
+               return r;
+       }
+       parser->ib->length_dw = ib_chunk->length_dw;
+       r = radeon_cs_parse(parser);
+       if (r || parser->parser_error) {
+               DRM_ERROR("Invalid command stream !\n");
+               return r;
+       }
+       r = radeon_cs_finish_pages(parser);
+       if (r) {
+               DRM_ERROR("Invalid command stream !\n");
+               return r;
+       }
+       r = radeon_cs_sync_rings(parser);
+       if (r) {
+               DRM_ERROR("Failed to synchronize rings !\n");
+       }
+       parser->ib->vm_id = 0;
+       r = radeon_ib_schedule(rdev, parser->ib);
+       if (r) {
+               DRM_ERROR("Failed to schedule IB !\n");
+       }
+       return 0;
+}
+
+static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
+                                  struct radeon_vm *vm)
+{
+       struct radeon_bo_list *lobj;
+       struct radeon_bo *bo;
+       int r;
+
+       list_for_each_entry(lobj, &parser->validated, tv.head) {
+               bo = lobj->bo;
+               r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
+               if (r) {
+                       return r;
+               }
+       }
+       return 0;
+}
+
+static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
+                                struct radeon_cs_parser *parser)
+{
+       struct radeon_cs_chunk *ib_chunk;
+       struct radeon_fpriv *fpriv = parser->filp->driver_priv;
+       struct radeon_vm *vm = &fpriv->vm;
+       int r;
+
+       if (parser->chunk_ib_idx == -1)
+               return 0;
+
+       if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
+               return 0;
+
+       ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+       if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+               DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
+               return -EINVAL;
+       }
+       r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
+                          ib_chunk->length_dw * 4);
+       if (r) {
+               DRM_ERROR("Failed to get ib !\n");
+               return r;
+       }
+       parser->ib->length_dw = ib_chunk->length_dw;
+       /* Copy the packet into the IB */
+       if (DRM_COPY_FROM_USER(parser->ib->ptr, ib_chunk->user_ptr,
+                              ib_chunk->length_dw * 4)) {
+               return -EFAULT;
+       }
+       r = radeon_ring_ib_parse(rdev, parser->ring, parser->ib);
+       if (r) {
+               return r;
+       }
+
+       mutex_lock(&vm->mutex);
+       r = radeon_vm_bind(rdev, vm);
+       if (r) {
+               goto out;
+       }
+       r = radeon_bo_vm_update_pte(parser, vm);
+       if (r) {
+               goto out;
+       }
+       r = radeon_cs_sync_rings(parser);
+       if (r) {
+               DRM_ERROR("Failed to synchronize rings !\n");
+       }
+       parser->ib->vm_id = vm->id;
+       /* ib pool is bind at 0 in virtual address space to gpu_addr is the
+        * offset inside the pool bo
+        */
+       parser->ib->gpu_addr = parser->ib->sa_bo.offset;
+       r = radeon_ib_schedule(rdev, parser->ib);
+out:
+       if (!r) {
+               if (vm->fence) {
+                       radeon_fence_unref(&vm->fence);
+               }
+               vm->fence = radeon_fence_ref(parser->ib->fence);
+       }
+       mutex_unlock(&fpriv->vm.mutex);
+       return r;
+}
+
 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_cs_parser parser;
-       struct radeon_cs_chunk *ib_chunk;
        int r;
 
        radeon_mutex_lock(&rdev->cs_mutex);
@@ -245,13 +466,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                radeon_mutex_unlock(&rdev->cs_mutex);
                return r;
        }
-       r =  radeon_ib_get(rdev, &parser.ib);
-       if (r) {
-               DRM_ERROR("Failed to get ib !\n");
-               radeon_cs_parser_fini(&parser, r);
-               radeon_mutex_unlock(&rdev->cs_mutex);
-               return r;
-       }
        r = radeon_cs_parser_relocs(&parser);
        if (r) {
                if (r != -ERESTARTSYS)
@@ -260,29 +474,15 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                radeon_mutex_unlock(&rdev->cs_mutex);
                return r;
        }
-       /* Copy the packet into the IB, the parser will read from the
-        * input memory (cached) and write to the IB (which can be
-        * uncached). */
-       ib_chunk = &parser.chunks[parser.chunk_ib_idx];
-       parser.ib->length_dw = ib_chunk->length_dw;
-       r = radeon_cs_parse(&parser);
-       if (r || parser.parser_error) {
-               DRM_ERROR("Invalid command stream !\n");
-               radeon_cs_parser_fini(&parser, r);
-               radeon_mutex_unlock(&rdev->cs_mutex);
-               return r;
-       }
-       r = radeon_cs_finish_pages(&parser);
+       r = radeon_cs_ib_chunk(rdev, &parser);
        if (r) {
-               DRM_ERROR("Invalid command stream !\n");
-               radeon_cs_parser_fini(&parser, r);
-               radeon_mutex_unlock(&rdev->cs_mutex);
-               return r;
+               goto out;
        }
-       r = radeon_ib_schedule(rdev, parser.ib);
+       r = radeon_cs_ib_vm_chunk(rdev, &parser);
        if (r) {
-               DRM_ERROR("Failed to schedule IB !\n");
+               goto out;
        }
+out:
        radeon_cs_parser_fini(&parser, r);
        radeon_mutex_unlock(&rdev->cs_mutex);
        return r;
index c4d00a1..0afb13b 100644 (file)
@@ -224,8 +224,11 @@ int radeon_wb_init(struct radeon_device *rdev)
        if (radeon_no_wb == 1)
                rdev->wb.enabled = false;
        else {
-               /* often unreliable on AGP */
                if (rdev->flags & RADEON_IS_AGP) {
+                       /* often unreliable on AGP */
+                       rdev->wb.enabled = false;
+               } else if (rdev->family < CHIP_R300) {
+                       /* often unreliable on pre-r300 */
                        rdev->wb.enabled = false;
                } else {
                        rdev->wb.enabled = true;
@@ -718,17 +721,24 @@ int radeon_device_init(struct radeon_device *rdev,
         * can recall function without having locking issues */
        radeon_mutex_init(&rdev->cs_mutex);
        mutex_init(&rdev->ib_pool.mutex);
-       mutex_init(&rdev->cp.mutex);
+       for (i = 0; i < RADEON_NUM_RINGS; ++i)
+               mutex_init(&rdev->ring[i].mutex);
        mutex_init(&rdev->dc_hw_i2c_mutex);
        if (rdev->family >= CHIP_R600)
                spin_lock_init(&rdev->ih.lock);
        mutex_init(&rdev->gem.mutex);
        mutex_init(&rdev->pm.mutex);
        mutex_init(&rdev->vram_mutex);
-       rwlock_init(&rdev->fence_drv.lock);
+       rwlock_init(&rdev->fence_lock);
+       rwlock_init(&rdev->semaphore_drv.lock);
        INIT_LIST_HEAD(&rdev->gem.objects);
        init_waitqueue_head(&rdev->irq.vblank_queue);
        init_waitqueue_head(&rdev->irq.idle_queue);
+       INIT_LIST_HEAD(&rdev->semaphore_drv.bo);
+       /* initialize vm here */
+       rdev->vm_manager.use_bitmap = 1;
+       rdev->vm_manager.max_pfn = 1 << 20;
+       INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
 
        /* Set asic functions */
        r = radeon_asic_init(rdev);
@@ -765,8 +775,14 @@ int radeon_device_init(struct radeon_device *rdev,
        r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
        if (r) {
                rdev->need_dma32 = true;
+               dma_bits = 32;
                printk(KERN_WARNING "radeon: No suitable DMA available.\n");
        }
+       r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
+       if (r) {
+               pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
+               printk(KERN_WARNING "radeon: No coherent DMA available.\n");
+       }
 
        /* Registers mapping */
        /* TODO: block userspace mapping of io register */
@@ -814,15 +830,20 @@ int radeon_device_init(struct radeon_device *rdev,
                if (r)
                        return r;
        }
-       if (radeon_testing) {
+       if ((radeon_testing & 1)) {
                radeon_test_moves(rdev);
        }
+       if ((radeon_testing & 2)) {
+               radeon_test_syncing(rdev);
+       }
        if (radeon_benchmarking) {
                radeon_benchmark(rdev, radeon_benchmarking);
        }
        return 0;
 }
 
+static void radeon_debugfs_remove_files(struct radeon_device *rdev);
+
 void radeon_device_fini(struct radeon_device *rdev)
 {
        DRM_INFO("radeon: finishing device.\n");
@@ -837,6 +858,7 @@ void radeon_device_fini(struct radeon_device *rdev)
        rdev->rio_mem = NULL;
        iounmap(rdev->rmmio);
        rdev->rmmio = NULL;
+       radeon_debugfs_remove_files(rdev);
 }
 
 
@@ -848,7 +870,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
        struct radeon_device *rdev;
        struct drm_crtc *crtc;
        struct drm_connector *connector;
-       int r;
+       int i, r;
 
        if (dev == NULL || dev->dev_private == NULL) {
                return -ENODEV;
@@ -887,7 +909,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
        /* evict vram memory */
        radeon_bo_evict_vram(rdev);
        /* wait for gpu to finish processing current batch */
-       radeon_fence_wait_last(rdev);
+       for (i = 0; i < RADEON_NUM_RINGS; i++)
+               radeon_fence_wait_last(rdev, i);
 
        radeon_save_bios_scratch_regs(rdev);
 
@@ -986,36 +1009,29 @@ int radeon_gpu_reset(struct radeon_device *rdev)
 /*
  * Debugfs
  */
-struct radeon_debugfs {
-       struct drm_info_list    *files;
-       unsigned                num_files;
-};
-static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
-static unsigned _radeon_debugfs_count = 0;
-
 int radeon_debugfs_add_files(struct radeon_device *rdev,
                             struct drm_info_list *files,
                             unsigned nfiles)
 {
        unsigned i;
 
-       for (i = 0; i < _radeon_debugfs_count; i++) {
-               if (_radeon_debugfs[i].files == files) {
+       for (i = 0; i < rdev->debugfs_count; i++) {
+               if (rdev->debugfs[i].files == files) {
                        /* Already registered */
                        return 0;
                }
        }
 
-       i = _radeon_debugfs_count + 1;
+       i = rdev->debugfs_count + 1;
        if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
                DRM_ERROR("Reached maximum number of debugfs components.\n");
                DRM_ERROR("Report so we increase "
                          "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
                return -EINVAL;
        }
-       _radeon_debugfs[_radeon_debugfs_count].files = files;
-       _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles;
-       _radeon_debugfs_count = i;
+       rdev->debugfs[rdev->debugfs_count].files = files;
+       rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
+       rdev->debugfs_count = i;
 #if defined(CONFIG_DEBUG_FS)
        drm_debugfs_create_files(files, nfiles,
                                 rdev->ddev->control->debugfs_root,
@@ -1027,6 +1043,22 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
        return 0;
 }
 
+static void radeon_debugfs_remove_files(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       unsigned i;
+
+       for (i = 0; i < rdev->debugfs_count; i++) {
+               drm_debugfs_remove_files(rdev->debugfs[i].files,
+                                        rdev->debugfs[i].num_files,
+                                        rdev->ddev->control);
+               drm_debugfs_remove_files(rdev->debugfs[i].files,
+                                        rdev->debugfs[i].num_files,
+                                        rdev->ddev->primary);
+       }
+#endif
+}
+
 #if defined(CONFIG_DEBUG_FS)
 int radeon_debugfs_init(struct drm_minor *minor)
 {
@@ -1035,11 +1067,5 @@ int radeon_debugfs_init(struct drm_minor *minor)
 
 void radeon_debugfs_cleanup(struct drm_minor *minor)
 {
-       unsigned i;
-
-       for (i = 0; i < _radeon_debugfs_count; i++) {
-               drm_debugfs_remove_files(_radeon_debugfs[i].files,
-                                        _radeon_debugfs[i].num_files, minor);
-       }
 }
 #endif
index a22d6e6..d3ffc18 100644 (file)
@@ -406,7 +406,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
        if (!ASIC_IS_AVIVO(rdev)) {
                /* crtc offset is from display base addr not FB location */
                base -= radeon_crtc->legacy_display_base_addr;
-               pitch_pixels = fb->pitch / (fb->bits_per_pixel / 8);
+               pitch_pixels = fb->pitches[0] / (fb->bits_per_pixel / 8);
 
                if (tiling_flags & RADEON_TILING_MACRO) {
                        if (ASIC_IS_R300(rdev)) {
@@ -1081,7 +1081,7 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
 void
 radeon_framebuffer_init(struct drm_device *dev,
                        struct radeon_framebuffer *rfb,
-                       struct drm_mode_fb_cmd *mode_cmd,
+                       struct drm_mode_fb_cmd2 *mode_cmd,
                        struct drm_gem_object *obj)
 {
        rfb->obj = obj;
@@ -1092,15 +1092,15 @@ radeon_framebuffer_init(struct drm_device *dev,
 static struct drm_framebuffer *
 radeon_user_framebuffer_create(struct drm_device *dev,
                               struct drm_file *file_priv,
-                              struct drm_mode_fb_cmd *mode_cmd)
+                              struct drm_mode_fb_cmd2 *mode_cmd)
 {
        struct drm_gem_object *obj;
        struct radeon_framebuffer *radeon_fb;
 
-       obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
+       obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
        if (obj ==  NULL) {
                dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
-                       "can't create framebuffer\n", mode_cmd->handle);
+                       "can't create framebuffer\n", mode_cmd->handles[0]);
                return ERR_PTR(-ENOENT);
        }
 
index 71499fc..31da622 100644 (file)
  *   2.10.0 - fusion 2D tiling
  *   2.11.0 - backend map, initial compute support for the CS checker
  *   2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
+ *   2.13.0 - virtual memory support
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       12
+#define KMS_DRIVER_MINOR       13
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
@@ -84,6 +85,10 @@ int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
 int radeon_gem_object_init(struct drm_gem_object *obj);
 void radeon_gem_object_free(struct drm_gem_object *obj);
+int radeon_gem_object_open(struct drm_gem_object *obj,
+                               struct drm_file *file_priv);
+void radeon_gem_object_close(struct drm_gem_object *obj,
+                               struct drm_file *file_priv);
 extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
                                      int *vpos, int *hpos);
 extern struct drm_ioctl_desc radeon_ioctls_kms[];
@@ -206,6 +211,21 @@ static struct pci_device_id pciidlist[] = {
 MODULE_DEVICE_TABLE(pci, pciidlist);
 #endif
 
+static const struct file_operations radeon_driver_old_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = drm_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .read = drm_read,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = radeon_compat_ioctl,
+#endif
+       .llseek = noop_llseek,
+};
+
 static struct drm_driver driver_old = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
@@ -232,21 +252,7 @@ static struct drm_driver driver_old = {
        .reclaim_buffers = drm_core_reclaim_buffers,
        .ioctls = radeon_ioctls,
        .dma_ioctl = radeon_cp_buffers,
-       .fops = {
-                .owner = THIS_MODULE,
-                .open = drm_open,
-                .release = drm_release,
-                .unlocked_ioctl = drm_ioctl,
-                .mmap = drm_mmap,
-                .poll = drm_poll,
-                .fasync = drm_fasync,
-                .read = drm_read,
-#ifdef CONFIG_COMPAT
-                .compat_ioctl = radeon_compat_ioctl,
-#endif
-                .llseek = noop_llseek,
-       },
-
+       .fops = &radeon_driver_old_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = DRIVER_DATE,
@@ -304,6 +310,20 @@ radeon_pci_resume(struct pci_dev *pdev)
        return radeon_resume_kms(dev);
 }
 
+static const struct file_operations radeon_driver_kms_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = radeon_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .read = drm_read,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = radeon_kms_compat_ioctl,
+#endif
+};
+
 static struct drm_driver kms_driver = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
@@ -335,24 +355,13 @@ static struct drm_driver kms_driver = {
        .ioctls = radeon_ioctls_kms,
        .gem_init_object = radeon_gem_object_init,
        .gem_free_object = radeon_gem_object_free,
+       .gem_open_object = radeon_gem_object_open,
+       .gem_close_object = radeon_gem_object_close,
        .dma_ioctl = radeon_dma_ioctl_kms,
        .dumb_create = radeon_mode_dumb_create,
        .dumb_map_offset = radeon_mode_dumb_mmap,
        .dumb_destroy = radeon_mode_dumb_destroy,
-       .fops = {
-                .owner = THIS_MODULE,
-                .open = drm_open,
-                .release = drm_release,
-                .unlocked_ioctl = drm_ioctl,
-                .mmap = radeon_mmap,
-                .poll = drm_poll,
-                .fasync = drm_fasync,
-                .read = drm_read,
-#ifdef CONFIG_COMPAT
-                .compat_ioctl = radeon_kms_compat_ioctl,
-#endif
-       },
-
+       .fops = &radeon_driver_kms_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = DRIVER_DATE,
index 0b7b486..cf2bf35 100644 (file)
@@ -103,7 +103,7 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
 }
 
 static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
-                                        struct drm_mode_fb_cmd *mode_cmd,
+                                        struct drm_mode_fb_cmd2 *mode_cmd,
                                         struct drm_gem_object **gobj_p)
 {
        struct radeon_device *rdev = rfbdev->rdev;
@@ -114,13 +114,17 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
        int ret;
        int aligned_size, size;
        int height = mode_cmd->height;
+       u32 bpp, depth;
+
+       drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
 
        /* need to align pitch with crtc limits */
-       mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
+       mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
+                                                 fb_tiled) * ((bpp + 1) / 8);
 
        if (rdev->family >= CHIP_R600)
                height = ALIGN(mode_cmd->height, 8);
-       size = mode_cmd->pitch * height;
+       size = mode_cmd->pitches[0] * height;
        aligned_size = ALIGN(size, PAGE_SIZE);
        ret = radeon_gem_object_create(rdev, aligned_size, 0,
                                       RADEON_GEM_DOMAIN_VRAM,
@@ -137,7 +141,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
                tiling_flags = RADEON_TILING_MACRO;
 
 #ifdef __BIG_ENDIAN
-       switch (mode_cmd->bpp) {
+       switch (bpp) {
        case 32:
                tiling_flags |= RADEON_TILING_SWAP_32BIT;
                break;
@@ -151,7 +155,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
        if (tiling_flags) {
                ret = radeon_bo_set_tiling_flags(rbo,
                                                 tiling_flags | RADEON_TILING_SURFACE,
-                                                mode_cmd->pitch);
+                                                mode_cmd->pitches[0]);
                if (ret)
                        dev_err(rdev->dev, "FB failed to set tiling flags\n");
        }
@@ -187,7 +191,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
        struct radeon_device *rdev = rfbdev->rdev;
        struct fb_info *info;
        struct drm_framebuffer *fb = NULL;
-       struct drm_mode_fb_cmd mode_cmd;
+       struct drm_mode_fb_cmd2 mode_cmd;
        struct drm_gem_object *gobj = NULL;
        struct radeon_bo *rbo = NULL;
        struct device *device = &rdev->pdev->dev;
@@ -201,8 +205,8 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
        if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
                sizes->surface_bpp = 32;
 
-       mode_cmd.bpp = sizes->surface_bpp;
-       mode_cmd.depth = sizes->surface_depth;
+       mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+                                                         sizes->surface_depth);
 
        ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
        rbo = gem_to_radeon_bo(gobj);
@@ -228,7 +232,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
 
        strcpy(info->fix.id, "radeondrmfb");
 
-       drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+       drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
 
        info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
        info->fbops = &radeonfb_ops;
@@ -271,7 +275,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
        DRM_INFO("vram apper at 0x%lX\n",  (unsigned long)rdev->mc.aper_base);
        DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
        DRM_INFO("fb depth is %d\n", fb->depth);
-       DRM_INFO("   pitch is %d\n", fb->pitch);
+       DRM_INFO("   pitch is %d\n", fb->pitches[0]);
 
        vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
        return 0;
index 76ec0e9..64ea3dd 100644 (file)
 #include "radeon.h"
 #include "radeon_trace.h"
 
-static void radeon_fence_write(struct radeon_device *rdev, u32 seq)
+static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
 {
        if (rdev->wb.enabled) {
-               u32 scratch_index;
-               if (rdev->wb.use_event)
-                       scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
-               else
-                       scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
-               rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);
-       } else
-               WREG32(rdev->fence_drv.scratch_reg, seq);
+               *rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq);
+       } else {
+               WREG32(rdev->fence_drv[ring].scratch_reg, seq);
+       }
 }
 
-static u32 radeon_fence_read(struct radeon_device *rdev)
+static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
 {
-       u32 seq;
+       u32 seq = 0;
 
        if (rdev->wb.enabled) {
-               u32 scratch_index;
-               if (rdev->wb.use_event)
-                       scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
-               else
-                       scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
-               seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
-       } else
-               seq = RREG32(rdev->fence_drv.scratch_reg);
+               seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr);
+       } else {
+               seq = RREG32(rdev->fence_drv[ring].scratch_reg);
+       }
        return seq;
 }
 
@@ -73,28 +65,28 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
 {
        unsigned long irq_flags;
 
-       write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
-       if (fence->emited) {
-               write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       write_lock_irqsave(&rdev->fence_lock, irq_flags);
+       if (fence->emitted) {
+               write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
                return 0;
        }
-       fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
-       if (!rdev->cp.ready)
+       fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
+       if (!rdev->ring[fence->ring].ready)
                /* FIXME: cp is not running assume everythings is done right
                 * away
                 */
-               radeon_fence_write(rdev, fence->seq);
+               radeon_fence_write(rdev, fence->seq, fence->ring);
        else
-               radeon_fence_ring_emit(rdev, fence);
+               radeon_fence_ring_emit(rdev, fence->ring, fence);
 
        trace_radeon_fence_emit(rdev->ddev, fence->seq);
-       fence->emited = true;
-       list_move_tail(&fence->list, &rdev->fence_drv.emited);
-       write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       fence->emitted = true;
+       list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
+       write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
        return 0;
 }
 
-static bool radeon_fence_poll_locked(struct radeon_device *rdev)
+static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
 {
        struct radeon_fence *fence;
        struct list_head *i, *n;
@@ -102,34 +94,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
        bool wake = false;
        unsigned long cjiffies;
 
-       seq = radeon_fence_read(rdev);
-       if (seq != rdev->fence_drv.last_seq) {
-               rdev->fence_drv.last_seq = seq;
-               rdev->fence_drv.last_jiffies = jiffies;
-               rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+       seq = radeon_fence_read(rdev, ring);
+       if (seq != rdev->fence_drv[ring].last_seq) {
+               rdev->fence_drv[ring].last_seq = seq;
+               rdev->fence_drv[ring].last_jiffies = jiffies;
+               rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
        } else {
                cjiffies = jiffies;
-               if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
-                       cjiffies -= rdev->fence_drv.last_jiffies;
-                       if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
+               if (time_after(cjiffies, rdev->fence_drv[ring].last_jiffies)) {
+                       cjiffies -= rdev->fence_drv[ring].last_jiffies;
+                       if (time_after(rdev->fence_drv[ring].last_timeout, cjiffies)) {
                                /* update the timeout */
-                               rdev->fence_drv.last_timeout -= cjiffies;
+                               rdev->fence_drv[ring].last_timeout -= cjiffies;
                        } else {
                                /* the 500ms timeout is elapsed we should test
                                 * for GPU lockup
                                 */
-                               rdev->fence_drv.last_timeout = 1;
+                               rdev->fence_drv[ring].last_timeout = 1;
                        }
                } else {
                        /* wrap around update last jiffies, we will just wait
                         * a little longer
                         */
-                       rdev->fence_drv.last_jiffies = cjiffies;
+                       rdev->fence_drv[ring].last_jiffies = cjiffies;
                }
                return false;
        }
        n = NULL;
-       list_for_each(i, &rdev->fence_drv.emited) {
+       list_for_each(i, &rdev->fence_drv[ring].emitted) {
                fence = list_entry(i, struct radeon_fence, list);
                if (fence->seq == seq) {
                        n = i;
@@ -141,11 +133,11 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
                i = n;
                do {
                        n = i->prev;
-                       list_move_tail(i, &rdev->fence_drv.signaled);
+                       list_move_tail(i, &rdev->fence_drv[ring].signaled);
                        fence = list_entry(i, struct radeon_fence, list);
                        fence->signaled = true;
                        i = n;
-               } while (i != &rdev->fence_drv.emited);
+               } while (i != &rdev->fence_drv[ring].emitted);
                wake = true;
        }
        return wake;
@@ -157,14 +149,18 @@ static void radeon_fence_destroy(struct kref *kref)
         struct radeon_fence *fence;
 
        fence = container_of(kref, struct radeon_fence, kref);
-       write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
+       write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
        list_del(&fence->list);
-       fence->emited = false;
-       write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
+       fence->emitted = false;
+       write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
+       if (fence->semaphore)
+               radeon_semaphore_free(fence->rdev, fence->semaphore);
        kfree(fence);
 }
 
-int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
+int radeon_fence_create(struct radeon_device *rdev,
+                       struct radeon_fence **fence,
+                       int ring)
 {
        unsigned long irq_flags;
 
@@ -174,18 +170,19 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
        }
        kref_init(&((*fence)->kref));
        (*fence)->rdev = rdev;
-       (*fence)->emited = false;
+       (*fence)->emitted = false;
        (*fence)->signaled = false;
        (*fence)->seq = 0;
+       (*fence)->ring = ring;
+       (*fence)->semaphore = NULL;
        INIT_LIST_HEAD(&(*fence)->list);
 
-       write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
-       list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
-       write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       write_lock_irqsave(&rdev->fence_lock, irq_flags);
+       list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created);
+       write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
        return 0;
 }
 
-
 bool radeon_fence_signaled(struct radeon_fence *fence)
 {
        unsigned long irq_flags;
@@ -197,21 +194,21 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
        if (fence->rdev->gpu_lockup)
                return true;
 
-       write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
+       write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
        signaled = fence->signaled;
        /* if we are shuting down report all fence as signaled */
        if (fence->rdev->shutdown) {
                signaled = true;
        }
-       if (!fence->emited) {
-               WARN(1, "Querying an unemited fence : %p !\n", fence);
+       if (!fence->emitted) {
+               WARN(1, "Querying an unemitted fence : %p !\n", fence);
                signaled = true;
        }
        if (!signaled) {
-               radeon_fence_poll_locked(fence->rdev);
+               radeon_fence_poll_locked(fence->rdev, fence->ring);
                signaled = fence->signaled;
        }
-       write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
+       write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
        return signaled;
 }
 
@@ -230,24 +227,24 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
        if (radeon_fence_signaled(fence)) {
                return 0;
        }
-       timeout = rdev->fence_drv.last_timeout;
+       timeout = rdev->fence_drv[fence->ring].last_timeout;
 retry:
        /* save current sequence used to check for GPU lockup */
-       seq = rdev->fence_drv.last_seq;
+       seq = rdev->fence_drv[fence->ring].last_seq;
        trace_radeon_fence_wait_begin(rdev->ddev, seq);
        if (intr) {
-               radeon_irq_kms_sw_irq_get(rdev);
-               r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
+               radeon_irq_kms_sw_irq_get(rdev, fence->ring);
+               r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue,
                                radeon_fence_signaled(fence), timeout);
-               radeon_irq_kms_sw_irq_put(rdev);
+               radeon_irq_kms_sw_irq_put(rdev, fence->ring);
                if (unlikely(r < 0)) {
                        return r;
                }
        } else {
-               radeon_irq_kms_sw_irq_get(rdev);
-               r = wait_event_timeout(rdev->fence_drv.queue,
+               radeon_irq_kms_sw_irq_get(rdev, fence->ring);
+               r = wait_event_timeout(rdev->fence_drv[fence->ring].queue,
                         radeon_fence_signaled(fence), timeout);
-               radeon_irq_kms_sw_irq_put(rdev);
+               radeon_irq_kms_sw_irq_put(rdev, fence->ring);
        }
        trace_radeon_fence_wait_end(rdev->ddev, seq);
        if (unlikely(!radeon_fence_signaled(fence))) {
@@ -258,10 +255,11 @@ retry:
                        timeout = r;
                        goto retry;
                }
-               /* don't protect read access to rdev->fence_drv.last_seq
+               /* don't protect read access to rdev->fence_drv[t].last_seq
                 * if we experiencing a lockup the value doesn't change
                 */
-               if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
+               if (seq == rdev->fence_drv[fence->ring].last_seq &&
+                   radeon_gpu_is_lockup(rdev, &rdev->ring[fence->ring])) {
                        /* good news we believe it's a lockup */
                        printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
                             fence->seq, seq);
@@ -272,20 +270,20 @@ retry:
                        r = radeon_gpu_reset(rdev);
                        if (r)
                                return r;
-                       radeon_fence_write(rdev, fence->seq);
+                       radeon_fence_write(rdev, fence->seq, fence->ring);
                        rdev->gpu_lockup = false;
                }
                timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
-               write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
-               rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
-               rdev->fence_drv.last_jiffies = jiffies;
-               write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+               write_lock_irqsave(&rdev->fence_lock, irq_flags);
+               rdev->fence_drv[fence->ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+               rdev->fence_drv[fence->ring].last_jiffies = jiffies;
+               write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
                goto retry;
        }
        return 0;
 }
 
-int radeon_fence_wait_next(struct radeon_device *rdev)
+int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
 {
        unsigned long irq_flags;
        struct radeon_fence *fence;
@@ -294,21 +292,21 @@ int radeon_fence_wait_next(struct radeon_device *rdev)
        if (rdev->gpu_lockup) {
                return 0;
        }
-       write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
-       if (list_empty(&rdev->fence_drv.emited)) {
-               write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       write_lock_irqsave(&rdev->fence_lock, irq_flags);
+       if (list_empty(&rdev->fence_drv[ring].emitted)) {
+               write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
                return 0;
        }
-       fence = list_entry(rdev->fence_drv.emited.next,
+       fence = list_entry(rdev->fence_drv[ring].emitted.next,
                           struct radeon_fence, list);
        radeon_fence_ref(fence);
-       write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
        r = radeon_fence_wait(fence, false);
        radeon_fence_unref(&fence);
        return r;
 }
 
-int radeon_fence_wait_last(struct radeon_device *rdev)
+int radeon_fence_wait_last(struct radeon_device *rdev, int ring)
 {
        unsigned long irq_flags;
        struct radeon_fence *fence;
@@ -317,15 +315,15 @@ int radeon_fence_wait_last(struct radeon_device *rdev)
        if (rdev->gpu_lockup) {
                return 0;
        }
-       write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
-       if (list_empty(&rdev->fence_drv.emited)) {
-               write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       write_lock_irqsave(&rdev->fence_lock, irq_flags);
+       if (list_empty(&rdev->fence_drv[ring].emitted)) {
+               write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
                return 0;
        }
-       fence = list_entry(rdev->fence_drv.emited.prev,
+       fence = list_entry(rdev->fence_drv[ring].emitted.prev,
                           struct radeon_fence, list);
        radeon_fence_ref(fence);
-       write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
        r = radeon_fence_wait(fence, false);
        radeon_fence_unref(&fence);
        return r;
@@ -347,39 +345,95 @@ void radeon_fence_unref(struct radeon_fence **fence)
        }
 }
 
-void radeon_fence_process(struct radeon_device *rdev)
+void radeon_fence_process(struct radeon_device *rdev, int ring)
 {
        unsigned long irq_flags;
        bool wake;
 
-       write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
-       wake = radeon_fence_poll_locked(rdev);
-       write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       write_lock_irqsave(&rdev->fence_lock, irq_flags);
+       wake = radeon_fence_poll_locked(rdev, ring);
+       write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
        if (wake) {
-               wake_up_all(&rdev->fence_drv.queue);
+               wake_up_all(&rdev->fence_drv[ring].queue);
        }
 }
 
-int radeon_fence_driver_init(struct radeon_device *rdev)
+int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
+{
+       unsigned long irq_flags;
+       int not_processed = 0;
+
+       read_lock_irqsave(&rdev->fence_lock, irq_flags);
+       if (!rdev->fence_drv[ring].initialized)
+               return 0;
+
+       if (!list_empty(&rdev->fence_drv[ring].emitted)) {
+               struct list_head *ptr;
+               list_for_each(ptr, &rdev->fence_drv[ring].emitted) {
+                       /* count up to 3, that's enought info */
+                       if (++not_processed >= 3)
+                               break;
+               }
+       }
+       read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+       return not_processed;
+}
+
+int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
 {
        unsigned long irq_flags;
+       uint64_t index;
        int r;
 
-       write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
-       r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
-       if (r) {
-               dev_err(rdev->dev, "fence failed to get scratch register\n");
-               write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
-               return r;
+       write_lock_irqsave(&rdev->fence_lock, irq_flags);
+       radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
+       if (rdev->wb.use_event) {
+               rdev->fence_drv[ring].scratch_reg = 0;
+               index = R600_WB_EVENT_OFFSET + ring * 4;
+       } else {
+               r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
+               if (r) {
+                       dev_err(rdev->dev, "fence failed to get scratch register\n");
+                       write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+                       return r;
+               }
+               index = RADEON_WB_SCRATCH_OFFSET +
+                       rdev->fence_drv[ring].scratch_reg -
+                       rdev->scratch.reg_base;
        }
-       radeon_fence_write(rdev, 0);
-       atomic_set(&rdev->fence_drv.seq, 0);
-       INIT_LIST_HEAD(&rdev->fence_drv.created);
-       INIT_LIST_HEAD(&rdev->fence_drv.emited);
-       INIT_LIST_HEAD(&rdev->fence_drv.signaled);
-       init_waitqueue_head(&rdev->fence_drv.queue);
-       rdev->fence_drv.initialized = true;
-       write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+       rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
+       rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
+       radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
+       rdev->fence_drv[ring].initialized = true;
+       DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
+                ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
+       write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+       return 0;
+}
+
+static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
+{
+       rdev->fence_drv[ring].scratch_reg = -1;
+       rdev->fence_drv[ring].cpu_addr = NULL;
+       rdev->fence_drv[ring].gpu_addr = 0;
+       atomic_set(&rdev->fence_drv[ring].seq, 0);
+       INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
+       INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
+       INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
+       init_waitqueue_head(&rdev->fence_drv[ring].queue);
+       rdev->fence_drv[ring].initialized = false;
+}
+
+int radeon_fence_driver_init(struct radeon_device *rdev)
+{
+       unsigned long irq_flags;
+       int ring;
+
+       write_lock_irqsave(&rdev->fence_lock, irq_flags);
+       for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+               radeon_fence_driver_init_ring(rdev, ring);
+       }
+       write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
        if (radeon_debugfs_fence_init(rdev)) {
                dev_err(rdev->dev, "fence debugfs file creation failed\n");
        }
@@ -389,14 +443,18 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
 void radeon_fence_driver_fini(struct radeon_device *rdev)
 {
        unsigned long irq_flags;
-
-       if (!rdev->fence_drv.initialized)
-               return;
-       wake_up_all(&rdev->fence_drv.queue);
-       write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
-       radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
-       write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
-       rdev->fence_drv.initialized = false;
+       int ring;
+
+       for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+               if (!rdev->fence_drv[ring].initialized)
+                       continue;
+               radeon_fence_wait_last(rdev, ring);
+               wake_up_all(&rdev->fence_drv[ring].queue);
+               write_lock_irqsave(&rdev->fence_lock, irq_flags);
+               radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
+               write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+               rdev->fence_drv[ring].initialized = false;
+       }
 }
 
 
@@ -410,14 +468,21 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
        struct drm_device *dev = node->minor->dev;
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_fence *fence;
-
-       seq_printf(m, "Last signaled fence 0x%08X\n",
-                  radeon_fence_read(rdev));
-       if (!list_empty(&rdev->fence_drv.emited)) {
-                  fence = list_entry(rdev->fence_drv.emited.prev,
-                                     struct radeon_fence, list);
-                  seq_printf(m, "Last emited fence %p with 0x%08X\n",
-                             fence,  fence->seq);
+       int i;
+
+       for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+               if (!rdev->fence_drv[i].initialized)
+                       continue;
+
+               seq_printf(m, "--- ring %d ---\n", i);
+               seq_printf(m, "Last signaled fence 0x%08X\n",
+                          radeon_fence_read(rdev, i));
+               if (!list_empty(&rdev->fence_drv[i].emitted)) {
+                       fence = list_entry(rdev->fence_drv[i].emitted.prev,
+                                          struct radeon_fence, list);
+                       seq_printf(m, "Last emitted fence %p with 0x%08X\n",
+                                  fence,  fence->seq);
+               }
        }
        return 0;
 }
index ba7ab79..010dad8 100644 (file)
@@ -157,9 +157,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
        p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
        for (i = 0; i < pages; i++, p++) {
                if (rdev->gart.pages[p]) {
-                       if (!rdev->gart.ttm_alloced[p])
-                               pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
-                                               PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
                        rdev->gart.pages[p] = NULL;
                        rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
                        page_base = rdev->gart.pages_addr[p];
@@ -191,23 +188,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
        p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
 
        for (i = 0; i < pages; i++, p++) {
-               /* we reverted the patch using dma_addr in TTM for now but this
-                * code stops building on alpha so just comment it out for now */
-               if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */
-                       rdev->gart.ttm_alloced[p] = true;
-                       rdev->gart.pages_addr[p] = dma_addr[i];
-               } else {
-                       /* we need to support large memory configurations */
-                       /* assume that unbind have already been call on the range */
-                       rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
-                                                       0, PAGE_SIZE,
-                                                       PCI_DMA_BIDIRECTIONAL);
-                       if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
-                               /* FIXME: failed to map page (return -ENOMEM?) */
-                               radeon_gart_unbind(rdev, offset, pages);
-                               return -ENOMEM;
-                       }
-               }
+               rdev->gart.pages_addr[p] = dma_addr[i];
                rdev->gart.pages[p] = pagelist[i];
                if (rdev->gart.ptr) {
                        page_base = rdev->gart.pages_addr[p];
@@ -274,12 +255,6 @@ int radeon_gart_init(struct radeon_device *rdev)
                radeon_gart_fini(rdev);
                return -ENOMEM;
        }
-       rdev->gart.ttm_alloced = kzalloc(sizeof(bool) *
-                                        rdev->gart.num_cpu_pages, GFP_KERNEL);
-       if (rdev->gart.ttm_alloced == NULL) {
-               radeon_gart_fini(rdev);
-               return -ENOMEM;
-       }
        /* set GART entry to point to the dummy page by default */
        for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
                rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
@@ -296,10 +271,404 @@ void radeon_gart_fini(struct radeon_device *rdev)
        rdev->gart.ready = false;
        kfree(rdev->gart.pages);
        kfree(rdev->gart.pages_addr);
-       kfree(rdev->gart.ttm_alloced);
        rdev->gart.pages = NULL;
        rdev->gart.pages_addr = NULL;
-       rdev->gart.ttm_alloced = NULL;
 
        radeon_dummy_page_fini(rdev);
 }
+
+/*
+ * vm helpers
+ *
+ * TODO bind a default page at vm initialization for default address
+ */
+int radeon_vm_manager_init(struct radeon_device *rdev)
+{
+       int r;
+
+       rdev->vm_manager.enabled = false;
+
+       /* mark first vm as always in use, it's the system one */
+       r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
+                                     rdev->vm_manager.max_pfn * 8,
+                                     RADEON_GEM_DOMAIN_VRAM);
+       if (r) {
+               dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
+                       (rdev->vm_manager.max_pfn * 8) >> 10);
+               return r;
+       }
+
+       r = rdev->vm_manager.funcs->init(rdev);
+       if (r == 0)
+               rdev->vm_manager.enabled = true;
+
+       return r;
+}
+
+/* cs mutex must be lock */
+static void radeon_vm_unbind_locked(struct radeon_device *rdev,
+                                   struct radeon_vm *vm)
+{
+       struct radeon_bo_va *bo_va;
+
+       if (vm->id == -1) {
+               return;
+       }
+
+       /* wait for vm use to end */
+       if (vm->fence) {
+               radeon_fence_wait(vm->fence, false);
+               radeon_fence_unref(&vm->fence);
+       }
+
+       /* hw unbind */
+       rdev->vm_manager.funcs->unbind(rdev, vm);
+       rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
+       list_del_init(&vm->list);
+       vm->id = -1;
+       radeon_sa_bo_free(rdev, &vm->sa_bo);
+       vm->pt = NULL;
+
+       list_for_each_entry(bo_va, &vm->va, vm_list) {
+               bo_va->valid = false;
+       }
+}
+
+void radeon_vm_manager_fini(struct radeon_device *rdev)
+{
+       if (rdev->vm_manager.sa_manager.bo == NULL)
+               return;
+       radeon_vm_manager_suspend(rdev);
+       rdev->vm_manager.funcs->fini(rdev);
+       radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
+       rdev->vm_manager.enabled = false;
+}
+
+int radeon_vm_manager_start(struct radeon_device *rdev)
+{
+       if (rdev->vm_manager.sa_manager.bo == NULL) {
+               return -EINVAL;
+       }
+       return radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
+}
+
+int radeon_vm_manager_suspend(struct radeon_device *rdev)
+{
+       struct radeon_vm *vm, *tmp;
+
+       radeon_mutex_lock(&rdev->cs_mutex);
+       /* unbind all active vm */
+       list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
+               radeon_vm_unbind_locked(rdev, vm);
+       }
+       rdev->vm_manager.funcs->fini(rdev);
+       radeon_mutex_unlock(&rdev->cs_mutex);
+       return radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
+}
+
+/* cs mutex must be lock */
+void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+       mutex_lock(&vm->mutex);
+       radeon_vm_unbind_locked(rdev, vm);
+       mutex_unlock(&vm->mutex);
+}
+
+/* cs mutex must be lock & vm mutex must be lock */
+int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+       struct radeon_vm *vm_evict;
+       unsigned i;
+       int id = -1, r;
+
+       if (vm == NULL) {
+               return -EINVAL;
+       }
+
+       if (vm->id != -1) {
+               /* update lru */
+               list_del_init(&vm->list);
+               list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
+               return 0;
+       }
+
+retry:
+       r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
+                            RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
+                            RADEON_GPU_PAGE_SIZE);
+       if (r) {
+               if (list_empty(&rdev->vm_manager.lru_vm)) {
+                       return r;
+               }
+               vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
+               radeon_vm_unbind(rdev, vm_evict);
+               goto retry;
+       }
+       vm->pt = rdev->vm_manager.sa_manager.cpu_ptr;
+       vm->pt += (vm->sa_bo.offset >> 3);
+       vm->pt_gpu_addr = rdev->vm_manager.sa_manager.gpu_addr;
+       vm->pt_gpu_addr += vm->sa_bo.offset;
+       memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
+
+retry_id:
+       /* search for free vm */
+       for (i = 0; i < rdev->vm_manager.nvm; i++) {
+               if (!(rdev->vm_manager.use_bitmap & (1 << i))) {
+                       id = i;
+                       break;
+               }
+       }
+       /* evict vm if necessary */
+       if (id == -1) {
+               vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
+               radeon_vm_unbind(rdev, vm_evict);
+               goto retry_id;
+       }
+
+       /* do hw bind */
+       r = rdev->vm_manager.funcs->bind(rdev, vm, id);
+       if (r) {
+               radeon_sa_bo_free(rdev, &vm->sa_bo);
+               return r;
+       }
+       rdev->vm_manager.use_bitmap |= 1 << id;
+       vm->id = id;
+       list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
+       return radeon_vm_bo_update_pte(rdev, vm, rdev->ib_pool.sa_manager.bo,
+                                      &rdev->ib_pool.sa_manager.bo->tbo.mem);
+}
+
+/* object have to be reserved */
+int radeon_vm_bo_add(struct radeon_device *rdev,
+                    struct radeon_vm *vm,
+                    struct radeon_bo *bo,
+                    uint64_t offset,
+                    uint32_t flags)
+{
+       struct radeon_bo_va *bo_va, *tmp;
+       struct list_head *head;
+       uint64_t size = radeon_bo_size(bo), last_offset = 0;
+       unsigned last_pfn;
+
+       bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+       if (bo_va == NULL) {
+               return -ENOMEM;
+       }
+       bo_va->vm = vm;
+       bo_va->bo = bo;
+       bo_va->soffset = offset;
+       bo_va->eoffset = offset + size;
+       bo_va->flags = flags;
+       bo_va->valid = false;
+       INIT_LIST_HEAD(&bo_va->bo_list);
+       INIT_LIST_HEAD(&bo_va->vm_list);
+       /* make sure object fit at this offset */
+       if (bo_va->soffset >= bo_va->eoffset) {
+               kfree(bo_va);
+               return -EINVAL;
+       }
+
+       last_pfn = bo_va->eoffset / RADEON_GPU_PAGE_SIZE;
+       if (last_pfn > rdev->vm_manager.max_pfn) {
+               kfree(bo_va);
+               dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
+                       last_pfn, rdev->vm_manager.max_pfn);
+               return -EINVAL;
+       }
+
+       mutex_lock(&vm->mutex);
+       if (last_pfn > vm->last_pfn) {
+               /* grow va space 32M by 32M */
+               unsigned align = ((32 << 20) >> 12) - 1;
+               radeon_mutex_lock(&rdev->cs_mutex);
+               radeon_vm_unbind_locked(rdev, vm);
+               radeon_mutex_unlock(&rdev->cs_mutex);
+               vm->last_pfn = (last_pfn + align) & ~align;
+       }
+       head = &vm->va;
+       last_offset = 0;
+       list_for_each_entry(tmp, &vm->va, vm_list) {
+               if (bo_va->soffset >= last_offset && bo_va->eoffset < tmp->soffset) {
+                       /* bo can be added before this one */
+                       break;
+               }
+               if (bo_va->soffset >= tmp->soffset && bo_va->soffset < tmp->eoffset) {
+                       /* bo and tmp overlap, invalid offset */
+                       dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
+                               bo, (unsigned)bo_va->soffset, tmp->bo,
+                               (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
+                       kfree(bo_va);
+                       mutex_unlock(&vm->mutex);
+                       return -EINVAL;
+               }
+               last_offset = tmp->eoffset;
+               head = &tmp->vm_list;
+       }
+       list_add(&bo_va->vm_list, head);
+       list_add_tail(&bo_va->bo_list, &bo->va);
+       mutex_unlock(&vm->mutex);
+       return 0;
+}
+
+static u64 radeon_vm_get_addr(struct radeon_device *rdev,
+                             struct ttm_mem_reg *mem,
+                             unsigned pfn)
+{
+       u64 addr = 0;
+
+       switch (mem->mem_type) {
+       case TTM_PL_VRAM:
+               addr = (mem->start << PAGE_SHIFT);
+               addr += pfn * RADEON_GPU_PAGE_SIZE;
+               addr += rdev->vm_manager.vram_base_offset;
+               break;
+       case TTM_PL_TT:
+               /* offset inside page table */
+               addr = mem->start << PAGE_SHIFT;
+               addr += pfn * RADEON_GPU_PAGE_SIZE;
+               addr = addr >> PAGE_SHIFT;
+               /* page table offset */
+               addr = rdev->gart.pages_addr[addr];
+               /* in case cpu page size != gpu page size*/
+               addr += (pfn * RADEON_GPU_PAGE_SIZE) & (~PAGE_MASK);
+               break;
+       default:
+               break;
+       }
+       return addr;
+}
+
+/* object have to be reserved & cs mutex took & vm mutex took */
+int radeon_vm_bo_update_pte(struct radeon_device *rdev,
+                           struct radeon_vm *vm,
+                           struct radeon_bo *bo,
+                           struct ttm_mem_reg *mem)
+{
+       struct radeon_bo_va *bo_va;
+       unsigned ngpu_pages, i;
+       uint64_t addr = 0, pfn;
+       uint32_t flags;
+
+       /* nothing to do if vm isn't bound */
+       if (vm->id == -1)
+               return 0;;
+
+       bo_va = radeon_bo_va(bo, vm);
+       if (bo_va == NULL) {
+               dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+               return -EINVAL;
+       }
+
+       if (bo_va->valid)
+               return 0;
+
+       ngpu_pages = radeon_bo_ngpu_pages(bo);
+       bo_va->flags &= ~RADEON_VM_PAGE_VALID;
+       bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
+       if (mem) {
+               if (mem->mem_type != TTM_PL_SYSTEM) {
+                       bo_va->flags |= RADEON_VM_PAGE_VALID;
+                       bo_va->valid = true;
+               }
+               if (mem->mem_type == TTM_PL_TT) {
+                       bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
+               }
+       }
+       pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
+       flags = rdev->vm_manager.funcs->page_flags(rdev, bo_va->vm, bo_va->flags);
+       for (i = 0, addr = 0; i < ngpu_pages; i++) {
+               if (mem && bo_va->valid) {
+                       addr = radeon_vm_get_addr(rdev, mem, i);
+               }
+               rdev->vm_manager.funcs->set_page(rdev, bo_va->vm, i + pfn, addr, flags);
+       }
+       rdev->vm_manager.funcs->tlb_flush(rdev, bo_va->vm);
+       return 0;
+}
+
+/* object have to be reserved */
+int radeon_vm_bo_rmv(struct radeon_device *rdev,
+                    struct radeon_vm *vm,
+                    struct radeon_bo *bo)
+{
+       struct radeon_bo_va *bo_va;
+
+       bo_va = radeon_bo_va(bo, vm);
+       if (bo_va == NULL)
+               return 0;
+
+       list_del(&bo_va->bo_list);
+       mutex_lock(&vm->mutex);
+       radeon_mutex_lock(&rdev->cs_mutex);
+       radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
+       radeon_mutex_unlock(&rdev->cs_mutex);
+       list_del(&bo_va->vm_list);
+       mutex_unlock(&vm->mutex);
+
+       kfree(bo_va);
+       return 0;
+}
+
+void radeon_vm_bo_invalidate(struct radeon_device *rdev,
+                            struct radeon_bo *bo)
+{
+       struct radeon_bo_va *bo_va;
+
+       BUG_ON(!atomic_read(&bo->tbo.reserved));
+       list_for_each_entry(bo_va, &bo->va, bo_list) {
+               bo_va->valid = false;
+       }
+}
+
+int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+       int r;
+
+       vm->id = -1;
+       vm->fence = NULL;
+       mutex_init(&vm->mutex);
+       INIT_LIST_HEAD(&vm->list);
+       INIT_LIST_HEAD(&vm->va);
+       vm->last_pfn = 0;
+       /* map the ib pool buffer at 0 in virtual address space, set
+        * read only
+        */
+       r = radeon_vm_bo_add(rdev, vm, rdev->ib_pool.sa_manager.bo, 0,
+                            RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
+       return r;
+}
+
+void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+       struct radeon_bo_va *bo_va, *tmp;
+       int r;
+
+       mutex_lock(&vm->mutex);
+
+       radeon_mutex_lock(&rdev->cs_mutex);
+       radeon_vm_unbind_locked(rdev, vm);
+       radeon_mutex_unlock(&rdev->cs_mutex);
+
+       /* remove all bo */
+       r = radeon_bo_reserve(rdev->ib_pool.sa_manager.bo, false);
+       if (!r) {
+               bo_va = radeon_bo_va(rdev->ib_pool.sa_manager.bo, vm);
+               list_del_init(&bo_va->bo_list);
+               list_del_init(&bo_va->vm_list);
+               radeon_bo_unreserve(rdev->ib_pool.sa_manager.bo);
+               kfree(bo_va);
+       }
+       if (!list_empty(&vm->va)) {
+               dev_err(rdev->dev, "still active bo inside vm\n");
+       }
+       list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
+               list_del_init(&bo_va->vm_list);
+               r = radeon_bo_reserve(bo_va->bo, false);
+               if (!r) {
+                       list_del_init(&bo_va->bo_list);
+                       radeon_bo_unreserve(bo_va->bo);
+                       kfree(bo_va);
+               }
+       }
+       mutex_unlock(&vm->mutex);
+}
index aa1ca2d..7337850 100644 (file)
@@ -142,6 +142,44 @@ void radeon_gem_fini(struct radeon_device *rdev)
        radeon_bo_force_delete(rdev);
 }
 
+/*
+ * Call from drm_gem_handle_create which appear in both new and open ioctl
+ * case.
+ */
+int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+{
+       return 0;
+}
+
+void radeon_gem_object_close(struct drm_gem_object *obj,
+                            struct drm_file *file_priv)
+{
+       struct radeon_bo *rbo = gem_to_radeon_bo(obj);
+       struct radeon_device *rdev = rbo->rdev;
+       struct radeon_fpriv *fpriv = file_priv->driver_priv;
+       struct radeon_vm *vm = &fpriv->vm;
+       struct radeon_bo_va *bo_va, *tmp;
+
+       if (rdev->family < CHIP_CAYMAN) {
+               return;
+       }
+
+       if (radeon_bo_reserve(rbo, false)) {
+               return;
+       }
+       list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) {
+               if (bo_va->vm == vm) {
+                       /* remove from this vm address space */
+                       mutex_lock(&vm->mutex);
+                       list_del(&bo_va->vm_list);
+                       mutex_unlock(&vm->mutex);
+                       list_del(&bo_va->bo_list);
+                       kfree(bo_va);
+               }
+       }
+       radeon_bo_unreserve(rbo);
+}
+
 
 /*
  * GEM ioctls.
@@ -152,6 +190,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
        struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_gem_info *args = data;
        struct ttm_mem_type_manager *man;
+       unsigned i;
 
        man = &rdev->mman.bdev.man[TTM_PL_VRAM];
 
@@ -160,8 +199,9 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
        if (rdev->stollen_vga_memory)
                args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
        args->vram_visible -= radeon_fbdev_total_size(rdev);
-       args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
-               RADEON_IB_POOL_SIZE*64*1024;
+       args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
+       for(i = 0; i < RADEON_NUM_RINGS; ++i)
+               args->gart_size -= rdev->ring[i].ring_size;
        return 0;
 }
 
@@ -352,6 +392,109 @@ out:
        return r;
 }
 
+int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *filp)
+{
+       struct drm_radeon_gem_va *args = data;
+       struct drm_gem_object *gobj;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_fpriv *fpriv = filp->driver_priv;
+       struct radeon_bo *rbo;
+       struct radeon_bo_va *bo_va;
+       u32 invalid_flags;
+       int r = 0;
+
+       if (!rdev->vm_manager.enabled) {
+               args->operation = RADEON_VA_RESULT_ERROR;
+               return -ENOTTY;
+       }
+
+       /* !! DONT REMOVE !!
+        * We don't support vm_id yet, to be sure we don't have have broken
+        * userspace, reject anyone trying to use non 0 value thus moving
+        * forward we can use those fields without breaking existant userspace
+        */
+       if (args->vm_id) {
+               args->operation = RADEON_VA_RESULT_ERROR;
+               return -EINVAL;
+       }
+
+       if (args->offset < RADEON_VA_RESERVED_SIZE) {
+               dev_err(&dev->pdev->dev,
+                       "offset 0x%lX is in reserved area 0x%X\n",
+                       (unsigned long)args->offset,
+                       RADEON_VA_RESERVED_SIZE);
+               args->operation = RADEON_VA_RESULT_ERROR;
+               return -EINVAL;
+       }
+
+       /* don't remove, we need to enforce userspace to set the snooped flag
+        * otherwise we will endup with broken userspace and we won't be able
+        * to enable this feature without adding new interface
+        */
+       invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
+       if ((args->flags & invalid_flags)) {
+               dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
+                       args->flags, invalid_flags);
+               args->operation = RADEON_VA_RESULT_ERROR;
+               return -EINVAL;
+       }
+       if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
+               dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
+               args->operation = RADEON_VA_RESULT_ERROR;
+               return -EINVAL;
+       }
+
+       switch (args->operation) {
+       case RADEON_VA_MAP:
+       case RADEON_VA_UNMAP:
+               break;
+       default:
+               dev_err(&dev->pdev->dev, "unsupported operation %d\n",
+                       args->operation);
+               args->operation = RADEON_VA_RESULT_ERROR;
+               return -EINVAL;
+       }
+
+       gobj = drm_gem_object_lookup(dev, filp, args->handle);
+       if (gobj == NULL) {
+               args->operation = RADEON_VA_RESULT_ERROR;
+               return -ENOENT;
+       }
+       rbo = gem_to_radeon_bo(gobj);
+       r = radeon_bo_reserve(rbo, false);
+       if (r) {
+               args->operation = RADEON_VA_RESULT_ERROR;
+               drm_gem_object_unreference_unlocked(gobj);
+               return r;
+       }
+       switch (args->operation) {
+       case RADEON_VA_MAP:
+               bo_va = radeon_bo_va(rbo, &fpriv->vm);
+               if (bo_va) {
+                       args->operation = RADEON_VA_RESULT_VA_EXIST;
+                       args->offset = bo_va->soffset;
+                       goto out;
+               }
+               r = radeon_vm_bo_add(rdev, &fpriv->vm, rbo,
+                                    args->offset, args->flags);
+               break;
+       case RADEON_VA_UNMAP:
+               r = radeon_vm_bo_rmv(rdev, &fpriv->vm, rbo);
+               break;
+       default:
+               break;
+       }
+       args->operation = RADEON_VA_RESULT_OK;
+       if (r) {
+               args->operation = RADEON_VA_RESULT_ERROR;
+       }
+out:
+       radeon_bo_unreserve(rbo);
+       drm_gem_object_unreference_unlocked(gobj);
+       return r;
+}
+
 int radeon_mode_dumb_create(struct drm_file *file_priv,
                            struct drm_device *dev,
                            struct drm_mode_create_dumb *args)
index 8f86aeb..be38921 100644 (file)
@@ -65,7 +65,8 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
        unsigned i;
 
        /* Disable *all* interrupts */
-       rdev->irq.sw_int = false;
+       for (i = 0; i < RADEON_NUM_RINGS; i++)
+               rdev->irq.sw_int[i] = false;
        rdev->irq.gui_idle = false;
        for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
                rdev->irq.hpd[i] = false;
@@ -81,9 +82,11 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
 int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
 {
        struct radeon_device *rdev = dev->dev_private;
+       unsigned i;
 
        dev->max_vblank_count = 0x001fffff;
-       rdev->irq.sw_int = true;
+       for (i = 0; i < RADEON_NUM_RINGS; i++)
+               rdev->irq.sw_int[i] = true;
        radeon_irq_set(rdev);
        return 0;
 }
@@ -97,7 +100,8 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
                return;
        }
        /* Disable *all* interrupts */
-       rdev->irq.sw_int = false;
+       for (i = 0; i < RADEON_NUM_RINGS; i++)
+               rdev->irq.sw_int[i] = false;
        rdev->irq.gui_idle = false;
        for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
                rdev->irq.hpd[i] = false;
@@ -194,26 +198,26 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
        flush_work_sync(&rdev->hotplug_work);
 }
 
-void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
 {
        unsigned long irqflags;
 
        spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
-       if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) {
-               rdev->irq.sw_int = true;
+       if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount[ring] == 1)) {
+               rdev->irq.sw_int[ring] = true;
                radeon_irq_set(rdev);
        }
        spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
 }
 
-void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
 {
        unsigned long irqflags;
 
        spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
-       BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0);
-       if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) {
-               rdev->irq.sw_int = false;
+       BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount[ring] <= 0);
+       if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount[ring] == 0)) {
+               rdev->irq.sw_int[ring] = false;
                radeon_irq_set(rdev);
        }
        spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
index be2c122..d335288 100644 (file)
@@ -250,6 +250,18 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        return -EINVAL;
                }
                break;
+       case RADEON_INFO_VA_START:
+               /* this is where we report if vm is supported or not */
+               if (rdev->family < CHIP_CAYMAN)
+                       return -EINVAL;
+               value = RADEON_VA_RESERVED_SIZE;
+               break;
+       case RADEON_INFO_IB_VM_MAX_SIZE:
+               /* this is where we report if vm is supported or not */
+               if (rdev->family < CHIP_CAYMAN)
+                       return -EINVAL;
+               value = RADEON_IB_VM_MAX_SIZE;
+               break;
        default:
                DRM_DEBUG_KMS("Invalid request %d\n", info->request);
                return -EINVAL;
@@ -270,7 +282,6 @@ int radeon_driver_firstopen_kms(struct drm_device *dev)
        return 0;
 }
 
-
 void radeon_driver_lastclose_kms(struct drm_device *dev)
 {
        vga_switcheroo_process_delayed_switch();
@@ -278,12 +289,45 @@ void radeon_driver_lastclose_kms(struct drm_device *dev)
 
 int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
 {
+       struct radeon_device *rdev = dev->dev_private;
+
+       file_priv->driver_priv = NULL;
+
+       /* new gpu have virtual address space support */
+       if (rdev->family >= CHIP_CAYMAN) {
+               struct radeon_fpriv *fpriv;
+               int r;
+
+               fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+               if (unlikely(!fpriv)) {
+                       return -ENOMEM;
+               }
+
+               r = radeon_vm_init(rdev, &fpriv->vm);
+               if (r) {
+                       radeon_vm_fini(rdev, &fpriv->vm);
+                       kfree(fpriv);
+                       return r;
+               }
+
+               file_priv->driver_priv = fpriv;
+       }
        return 0;
 }
 
 void radeon_driver_postclose_kms(struct drm_device *dev,
                                 struct drm_file *file_priv)
 {
+       struct radeon_device *rdev = dev->dev_private;
+
+       /* new gpu have virtual address space support */
+       if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
+               struct radeon_fpriv *fpriv = file_priv->driver_priv;
+
+               radeon_vm_fini(rdev, &fpriv->vm);
+               kfree(fpriv);
+               file_priv->driver_priv = NULL;
+       }
 }
 
 void radeon_driver_preclose_kms(struct drm_device *dev,
@@ -451,5 +495,6 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = {
        DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED),
 };
 int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
index daadf21..25a19c4 100644 (file)
@@ -437,7 +437,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
 
        crtc_offset_cntl = 0;
 
-       pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
+       pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
        crtc_pitch  = (((pitch_pixels * target_fb->bits_per_pixel) +
                        ((target_fb->bits_per_pixel * 8) - 1)) /
                       (target_fb->bits_per_pixel * 8));
index 2c2e75e..08ff857 100644 (file)
@@ -643,7 +643,7 @@ extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green
                                     u16 *blue, int regno);
 void radeon_framebuffer_init(struct drm_device *dev,
                             struct radeon_framebuffer *rfb,
-                            struct drm_mode_fb_cmd *mode_cmd,
+                            struct drm_mode_fb_cmd2 *mode_cmd,
                             struct drm_gem_object *obj);
 
 int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
index 1c85152..d45df17 100644 (file)
@@ -46,6 +46,20 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
  * function are calling it.
  */
 
+void radeon_bo_clear_va(struct radeon_bo *bo)
+{
+       struct radeon_bo_va *bo_va, *tmp;
+
+       list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
+               /* remove from all vm address space */
+               mutex_lock(&bo_va->vm->mutex);
+               list_del(&bo_va->vm_list);
+               mutex_unlock(&bo_va->vm->mutex);
+               list_del(&bo_va->bo_list);
+               kfree(bo_va);
+       }
+}
+
 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 {
        struct radeon_bo *bo;
@@ -55,6 +69,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
        list_del_init(&bo->list);
        mutex_unlock(&bo->rdev->gem.mutex);
        radeon_bo_clear_surface_reg(bo);
+       radeon_bo_clear_va(bo);
        drm_gem_object_release(&bo->gem_base);
        kfree(bo);
 }
@@ -95,6 +110,7 @@ int radeon_bo_create(struct radeon_device *rdev,
        enum ttm_bo_type type;
        unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
        unsigned long max_size = 0;
+       size_t acc_size;
        int r;
 
        size = ALIGN(size, PAGE_SIZE);
@@ -117,6 +133,9 @@ int radeon_bo_create(struct radeon_device *rdev,
                return -ENOMEM;
        }
 
+       acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
+                                      sizeof(struct radeon_bo));
+
 retry:
        bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
        if (bo == NULL)
@@ -130,12 +149,13 @@ retry:
        bo->gem_base.driver_private = NULL;
        bo->surface_reg = -1;
        INIT_LIST_HEAD(&bo->list);
+       INIT_LIST_HEAD(&bo->va);
        radeon_ttm_placement_from_domain(bo, domain);
        /* Kernel allocation are uninterruptible */
        mutex_lock(&rdev->vram_mutex);
        r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
-                       &bo->placement, page_align, 0, !kernel, NULL, size,
-                       &radeon_ttm_bo_destroy);
+                       &bo->placement, page_align, 0, !kernel, NULL,
+                       acc_size, &radeon_ttm_bo_destroy);
        mutex_unlock(&rdev->vram_mutex);
        if (unlikely(r != 0)) {
                if (r != -ERESTARTSYS) {
@@ -483,6 +503,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
                return;
        rbo = container_of(bo, struct radeon_bo, tbo);
        radeon_bo_check_tiling(rbo, 0, 1);
+       radeon_vm_bo_invalidate(rbo->rdev, rbo);
 }
 
 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
@@ -556,3 +577,16 @@ int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
        }
        return 0;
 }
+
+/* object have to be reserved */
+struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo, struct radeon_vm *vm)
+{
+       struct radeon_bo_va *bo_va;
+
+       list_for_each_entry(bo_va, &rbo->va, bo_list) {
+               if (bo_va->vm == vm) {
+                       return bo_va;
+               }
+       }
+       return NULL;
+}
index b07f0f9..cde4303 100644 (file)
@@ -83,6 +83,16 @@ static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
        return !!atomic_read(&bo->tbo.reserved);
 }
 
+static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
+{
+       return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+}
+
+static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
+{
+       return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+}
+
 /**
  * radeon_bo_mmap_offset - return mmap offset of bo
  * @bo:        radeon object for which we query the offset
@@ -128,4 +138,26 @@ extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
                                        struct ttm_mem_reg *mem);
 extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
+extern struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo,
+                                        struct radeon_vm *vm);
+
+/*
+ * sub allocation
+ */
+extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
+                                    struct radeon_sa_manager *sa_manager,
+                                    unsigned size, u32 domain);
+extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
+                                     struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
+                                     struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
+                                       struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_new(struct radeon_device *rdev,
+                           struct radeon_sa_manager *sa_manager,
+                           struct radeon_sa_bo *sa_bo,
+                           unsigned size, unsigned align);
+extern void radeon_sa_bo_free(struct radeon_device *rdev,
+                             struct radeon_sa_bo *sa_bo);
+
 #endif
index 78a665b..095148e 100644 (file)
@@ -252,7 +252,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
 
        mutex_lock(&rdev->ddev->struct_mutex);
        mutex_lock(&rdev->vram_mutex);
-       mutex_lock(&rdev->cp.mutex);
+       for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+               if (rdev->ring[i].ring_obj)
+                       mutex_lock(&rdev->ring[i].mutex);
+       }
 
        /* gui idle int has issues on older chips it seems */
        if (rdev->family >= CHIP_R600) {
@@ -268,12 +271,13 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
                        radeon_irq_set(rdev);
                }
        } else {
-               if (rdev->cp.ready) {
+               struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+               if (ring->ready) {
                        struct radeon_fence *fence;
-                       radeon_ring_alloc(rdev, 64);
-                       radeon_fence_create(rdev, &fence);
+                       radeon_ring_alloc(rdev, ring, 64);
+                       radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, ring));
                        radeon_fence_emit(rdev, fence);
-                       radeon_ring_commit(rdev);
+                       radeon_ring_commit(rdev, ring);
                        radeon_fence_wait(fence, false);
                        radeon_fence_unref(&fence);
                }
@@ -307,7 +311,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
 
        rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
 
-       mutex_unlock(&rdev->cp.mutex);
+       for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+               if (rdev->ring[i].ring_obj)
+                       mutex_unlock(&rdev->ring[i].mutex);
+       }
        mutex_unlock(&rdev->vram_mutex);
        mutex_unlock(&rdev->ddev->struct_mutex);
 }
@@ -795,19 +802,14 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
        resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
        mutex_lock(&rdev->pm.mutex);
        if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
-               unsigned long irq_flags;
                int not_processed = 0;
+               int i;
 
-               read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
-               if (!list_empty(&rdev->fence_drv.emited)) {
-                       struct list_head *ptr;
-                       list_for_each(ptr, &rdev->fence_drv.emited) {
-                               /* count up to 3, that's enought info */
-                               if (++not_processed >= 3)
-                                       break;
-                       }
+               for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+                       not_processed += radeon_fence_count_emitted(rdev, i);
+                       if (not_processed >= 3)
+                               break;
                }
-               read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
 
                if (not_processed >= 3) { /* should upclock */
                        if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
index 49d5820..e8bc709 100644 (file)
@@ -34,6 +34,7 @@
 #include "atom.h"
 
 int radeon_debugfs_ib_init(struct radeon_device *rdev);
+int radeon_debugfs_ring_init(struct radeon_device *rdev);
 
 u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
 {
@@ -60,105 +61,106 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
        return idx_value;
 }
 
-void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
 {
 #if DRM_DEBUG_CODE
-       if (rdev->cp.count_dw <= 0) {
+       if (ring->count_dw <= 0) {
                DRM_ERROR("radeon: writting more dword to ring than expected !\n");
        }
 #endif
-       rdev->cp.ring[rdev->cp.wptr++] = v;
-       rdev->cp.wptr &= rdev->cp.ptr_mask;
-       rdev->cp.count_dw--;
-       rdev->cp.ring_free_dw--;
+       ring->ring[ring->wptr++] = v;
+       ring->wptr &= ring->ptr_mask;
+       ring->count_dw--;
+       ring->ring_free_dw--;
 }
 
-void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
-{
-       struct radeon_ib *ib, *n;
-
-       list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
-               list_del(&ib->list);
-               vfree(ib->ptr);
-               kfree(ib);
-       }
-}
-
-void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
+/*
+ * IB.
+ */
+bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
 {
-       struct radeon_ib *bib;
-
-       bib = kmalloc(sizeof(*bib), GFP_KERNEL);
-       if (bib == NULL)
-               return;
-       bib->ptr = vmalloc(ib->length_dw * 4);
-       if (bib->ptr == NULL) {
-               kfree(bib);
-               return;
+       bool done = false;
+
+       /* only free ib which have been emited */
+       if (ib->fence && ib->fence->emitted) {
+               if (radeon_fence_signaled(ib->fence)) {
+                       radeon_fence_unref(&ib->fence);
+                       radeon_sa_bo_free(rdev, &ib->sa_bo);
+                       done = true;
+               }
        }
-       memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
-       bib->length_dw = ib->length_dw;
-       mutex_lock(&rdev->ib_pool.mutex);
-       list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
-       mutex_unlock(&rdev->ib_pool.mutex);
+       return done;
 }
 
-/*
- * IB.
- */
-int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+                 struct radeon_ib **ib, unsigned size)
 {
        struct radeon_fence *fence;
-       struct radeon_ib *nib;
-       int r = 0, i, c;
+       unsigned cretry = 0;
+       int r = 0, i, idx;
 
        *ib = NULL;
-       r = radeon_fence_create(rdev, &fence);
+       /* align size on 256 bytes */
+       size = ALIGN(size, 256);
+
+       r = radeon_fence_create(rdev, &fence, ring);
        if (r) {
                dev_err(rdev->dev, "failed to create fence for new IB\n");
                return r;
        }
+
        mutex_lock(&rdev->ib_pool.mutex);
-       for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
-               i &= (RADEON_IB_POOL_SIZE - 1);
-               if (rdev->ib_pool.ibs[i].free) {
-                       nib = &rdev->ib_pool.ibs[i];
-                       break;
-               }
-       }
-       if (nib == NULL) {
-               /* This should never happen, it means we allocated all
-                * IB and haven't scheduled one yet, return EBUSY to
-                * userspace hoping that on ioctl recall we get better
-                * luck
-                */
-               dev_err(rdev->dev, "no free indirect buffer !\n");
+       idx = rdev->ib_pool.head_id;
+retry:
+       if (cretry > 5) {
+               dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
                mutex_unlock(&rdev->ib_pool.mutex);
                radeon_fence_unref(&fence);
-               return -EBUSY;
+               return -ENOMEM;
        }
-       rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
-       nib->free = false;
-       if (nib->fence) {
-               mutex_unlock(&rdev->ib_pool.mutex);
-               r = radeon_fence_wait(nib->fence, false);
-               if (r) {
-                       dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
-                               nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
-                       mutex_lock(&rdev->ib_pool.mutex);
-                       nib->free = true;
-                       mutex_unlock(&rdev->ib_pool.mutex);
-                       radeon_fence_unref(&fence);
-                       return r;
+       cretry++;
+       for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+               radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
+               if (rdev->ib_pool.ibs[idx].fence == NULL) {
+                       r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
+                                            &rdev->ib_pool.ibs[idx].sa_bo,
+                                            size, 256);
+                       if (!r) {
+                               *ib = &rdev->ib_pool.ibs[idx];
+                               (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
+                               (*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
+                               (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
+                               (*ib)->gpu_addr += (*ib)->sa_bo.offset;
+                               (*ib)->fence = fence;
+                               (*ib)->vm_id = 0;
+                               /* ib are most likely to be allocated in a ring fashion
+                                * thus rdev->ib_pool.head_id should be the id of the
+                                * oldest ib
+                                */
+                               rdev->ib_pool.head_id = (1 + idx);
+                               rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
+                               mutex_unlock(&rdev->ib_pool.mutex);
+                               return 0;
+                       }
                }
-               mutex_lock(&rdev->ib_pool.mutex);
+               idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
+       }
+       /* this should be rare event, ie all ib scheduled none signaled yet.
+        */
+       for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+               if (rdev->ib_pool.ibs[idx].fence && rdev->ib_pool.ibs[idx].fence->emitted) {
+                       r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false);
+                       if (!r) {
+                               goto retry;
+                       }
+                       /* an error happened */
+                       break;
+               }
+               idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
        }
-       radeon_fence_unref(&nib->fence);
-       nib->fence = fence;
-       nib->length_dw = 0;
        mutex_unlock(&rdev->ib_pool.mutex);
-       *ib = nib;
-       return 0;
+       radeon_fence_unref(&fence);
+       return r;
 }
 
 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -169,247 +171,255 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
        if (tmp == NULL) {
                return;
        }
-       if (!tmp->fence->emited)
-               radeon_fence_unref(&tmp->fence);
        mutex_lock(&rdev->ib_pool.mutex);
-       tmp->free = true;
+       if (tmp->fence && !tmp->fence->emitted) {
+               radeon_sa_bo_free(rdev, &tmp->sa_bo);
+               radeon_fence_unref(&tmp->fence);
+       }
        mutex_unlock(&rdev->ib_pool.mutex);
 }
 
 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
 {
+       struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
        int r = 0;
 
-       if (!ib->length_dw || !rdev->cp.ready) {
+       if (!ib->length_dw || !ring->ready) {
                /* TODO: Nothings in the ib we should report. */
                DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
                return -EINVAL;
        }
 
        /* 64 dwords should be enough for fence too */
-       r = radeon_ring_lock(rdev, 64);
+       r = radeon_ring_lock(rdev, ring, 64);
        if (r) {
                DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
                return r;
        }
-       radeon_ring_ib_execute(rdev, ib);
+       radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
        radeon_fence_emit(rdev, ib->fence);
-       mutex_lock(&rdev->ib_pool.mutex);
-       /* once scheduled IB is considered free and protected by the fence */
-       ib->free = true;
-       mutex_unlock(&rdev->ib_pool.mutex);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_unlock_commit(rdev, ring);
        return 0;
 }
 
 int radeon_ib_pool_init(struct radeon_device *rdev)
 {
-       void *ptr;
-       uint64_t gpu_addr;
-       int i;
-       int r = 0;
+       int i, r;
 
-       if (rdev->ib_pool.robj)
+       mutex_lock(&rdev->ib_pool.mutex);
+       if (rdev->ib_pool.ready) {
+               mutex_unlock(&rdev->ib_pool.mutex);
                return 0;
-       INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
-       /* Allocate 1M object buffer */
-       r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
-                            PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
-                            &rdev->ib_pool.robj);
-       if (r) {
-               DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
-               return r;
        }
-       r = radeon_bo_reserve(rdev->ib_pool.robj, false);
-       if (unlikely(r != 0))
-               return r;
-       r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
-       if (r) {
-               radeon_bo_unreserve(rdev->ib_pool.robj);
-               DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
-               return r;
-       }
-       r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
-       radeon_bo_unreserve(rdev->ib_pool.robj);
+
+       r = radeon_sa_bo_manager_init(rdev, &rdev->ib_pool.sa_manager,
+                                     RADEON_IB_POOL_SIZE*64*1024,
+                                     RADEON_GEM_DOMAIN_GTT);
        if (r) {
-               DRM_ERROR("radeon: failed to map ib pool (%d).\n", r);
+               mutex_unlock(&rdev->ib_pool.mutex);
                return r;
        }
-       for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
-               unsigned offset;
 
-               offset = i * 64 * 1024;
-               rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
-               rdev->ib_pool.ibs[i].ptr = ptr + offset;
+       for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+               rdev->ib_pool.ibs[i].fence = NULL;
                rdev->ib_pool.ibs[i].idx = i;
                rdev->ib_pool.ibs[i].length_dw = 0;
-               rdev->ib_pool.ibs[i].free = true;
+               INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list);
        }
        rdev->ib_pool.head_id = 0;
        rdev->ib_pool.ready = true;
        DRM_INFO("radeon: ib pool ready.\n");
+
        if (radeon_debugfs_ib_init(rdev)) {
                DRM_ERROR("Failed to register debugfs file for IB !\n");
        }
-       return r;
+       if (radeon_debugfs_ring_init(rdev)) {
+               DRM_ERROR("Failed to register debugfs file for rings !\n");
+       }
+       mutex_unlock(&rdev->ib_pool.mutex);
+       return 0;
 }
 
 void radeon_ib_pool_fini(struct radeon_device *rdev)
 {
-       int r;
-       struct radeon_bo *robj;
+       unsigned i;
 
-       if (!rdev->ib_pool.ready) {
-               return;
-       }
        mutex_lock(&rdev->ib_pool.mutex);
-       radeon_ib_bogus_cleanup(rdev);
-       robj = rdev->ib_pool.robj;
-       rdev->ib_pool.robj = NULL;
-       mutex_unlock(&rdev->ib_pool.mutex);
-
-       if (robj) {
-               r = radeon_bo_reserve(robj, false);
-               if (likely(r == 0)) {
-                       radeon_bo_kunmap(robj);
-                       radeon_bo_unpin(robj);
-                       radeon_bo_unreserve(robj);
+       if (rdev->ib_pool.ready) {
+               for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+                       radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
+                       radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
                }
-               radeon_bo_unref(&robj);
+               radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
+               rdev->ib_pool.ready = false;
        }
+       mutex_unlock(&rdev->ib_pool.mutex);
 }
 
+int radeon_ib_pool_start(struct radeon_device *rdev)
+{
+       return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
+}
+
+int radeon_ib_pool_suspend(struct radeon_device *rdev)
+{
+       return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
+}
 
 /*
  * Ring.
  */
-void radeon_ring_free_size(struct radeon_device *rdev)
+int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
 {
-       if (rdev->wb.enabled)
-               rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
-       else {
-               if (rdev->family >= CHIP_R600)
-                       rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
-               else
-                       rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+       /* r1xx-r5xx only has CP ring */
+       if (rdev->family < CHIP_R600)
+               return RADEON_RING_TYPE_GFX_INDEX;
+
+       if (rdev->family >= CHIP_CAYMAN) {
+               if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
+                       return CAYMAN_RING_TYPE_CP1_INDEX;
+               else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
+                       return CAYMAN_RING_TYPE_CP2_INDEX;
        }
+       return RADEON_RING_TYPE_GFX_INDEX;
+}
+
+void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+       u32 rptr;
+
+       if (rdev->wb.enabled)
+               rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
+       else
+               rptr = RREG32(ring->rptr_reg);
+       ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
        /* This works because ring_size is a power of 2 */
-       rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
-       rdev->cp.ring_free_dw -= rdev->cp.wptr;
-       rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
-       if (!rdev->cp.ring_free_dw) {
-               rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
+       ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
+       ring->ring_free_dw -= ring->wptr;
+       ring->ring_free_dw &= ring->ptr_mask;
+       if (!ring->ring_free_dw) {
+               ring->ring_free_dw = ring->ring_size / 4;
        }
 }
 
-int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
+
+int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
 {
        int r;
 
        /* Align requested size with padding so unlock_commit can
         * pad safely */
-       ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
-       while (ndw > (rdev->cp.ring_free_dw - 1)) {
-               radeon_ring_free_size(rdev);
-               if (ndw < rdev->cp.ring_free_dw) {
+       ndw = (ndw + ring->align_mask) & ~ring->align_mask;
+       while (ndw > (ring->ring_free_dw - 1)) {
+               radeon_ring_free_size(rdev, ring);
+               if (ndw < ring->ring_free_dw) {
                        break;
                }
-               r = radeon_fence_wait_next(rdev);
+               r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
                if (r)
                        return r;
        }
-       rdev->cp.count_dw = ndw;
-       rdev->cp.wptr_old = rdev->cp.wptr;
+       ring->count_dw = ndw;
+       ring->wptr_old = ring->wptr;
        return 0;
 }
 
-int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
+int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
 {
        int r;
 
-       mutex_lock(&rdev->cp.mutex);
-       r = radeon_ring_alloc(rdev, ndw);
+       mutex_lock(&ring->mutex);
+       r = radeon_ring_alloc(rdev, ring, ndw);
        if (r) {
-               mutex_unlock(&rdev->cp.mutex);
+               mutex_unlock(&ring->mutex);
                return r;
        }
        return 0;
 }
 
-void radeon_ring_commit(struct radeon_device *rdev)
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        unsigned count_dw_pad;
        unsigned i;
 
        /* We pad to match fetch size */
-       count_dw_pad = (rdev->cp.align_mask + 1) -
-                      (rdev->cp.wptr & rdev->cp.align_mask);
+       count_dw_pad = (ring->align_mask + 1) -
+                      (ring->wptr & ring->align_mask);
        for (i = 0; i < count_dw_pad; i++) {
-               radeon_ring_write(rdev, 2 << 30);
+               radeon_ring_write(ring, ring->nop);
        }
        DRM_MEMORYBARRIER();
-       radeon_cp_commit(rdev);
+       WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
+       (void)RREG32(ring->wptr_reg);
 }
 
-void radeon_ring_unlock_commit(struct radeon_device *rdev)
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
 {
-       radeon_ring_commit(rdev);
-       mutex_unlock(&rdev->cp.mutex);
+       radeon_ring_commit(rdev, ring);
+       mutex_unlock(&ring->mutex);
 }
 
-void radeon_ring_unlock_undo(struct radeon_device *rdev)
+void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
 {
-       rdev->cp.wptr = rdev->cp.wptr_old;
-       mutex_unlock(&rdev->cp.mutex);
+       ring->wptr = ring->wptr_old;
+       mutex_unlock(&ring->mutex);
 }
 
-int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
+int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
+                    unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
+                    u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
 {
        int r;
 
-       rdev->cp.ring_size = ring_size;
+       ring->ring_size = ring_size;
+       ring->rptr_offs = rptr_offs;
+       ring->rptr_reg = rptr_reg;
+       ring->wptr_reg = wptr_reg;
+       ring->ptr_reg_shift = ptr_reg_shift;
+       ring->ptr_reg_mask = ptr_reg_mask;
+       ring->nop = nop;
        /* Allocate ring buffer */
-       if (rdev->cp.ring_obj == NULL) {
-               r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true,
+       if (ring->ring_obj == NULL) {
+               r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
                                        RADEON_GEM_DOMAIN_GTT,
-                                       &rdev->cp.ring_obj);
+                                       &ring->ring_obj);
                if (r) {
                        dev_err(rdev->dev, "(%d) ring create failed\n", r);
                        return r;
                }
-               r = radeon_bo_reserve(rdev->cp.ring_obj, false);
+               r = radeon_bo_reserve(ring->ring_obj, false);
                if (unlikely(r != 0))
                        return r;
-               r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
-                                       &rdev->cp.gpu_addr);
+               r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
+                                       &ring->gpu_addr);
                if (r) {
-                       radeon_bo_unreserve(rdev->cp.ring_obj);
+                       radeon_bo_unreserve(ring->ring_obj);
                        dev_err(rdev->dev, "(%d) ring pin failed\n", r);
                        return r;
                }
-               r = radeon_bo_kmap(rdev->cp.ring_obj,
-                                      (void **)&rdev->cp.ring);
-               radeon_bo_unreserve(rdev->cp.ring_obj);
+               r = radeon_bo_kmap(ring->ring_obj,
+                                      (void **)&ring->ring);
+               radeon_bo_unreserve(ring->ring_obj);
                if (r) {
                        dev_err(rdev->dev, "(%d) ring map failed\n", r);
                        return r;
                }
        }
-       rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
-       rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
+       ring->ptr_mask = (ring->ring_size / 4) - 1;
+       ring->ring_free_dw = ring->ring_size / 4;
        return 0;
 }
 
-void radeon_ring_fini(struct radeon_device *rdev)
+void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
 {
        int r;
        struct radeon_bo *ring_obj;
 
-       mutex_lock(&rdev->cp.mutex);
-       ring_obj = rdev->cp.ring_obj;
-       rdev->cp.ring = NULL;
-       rdev->cp.ring_obj = NULL;
-       mutex_unlock(&rdev->cp.mutex);
+       mutex_lock(&ring->mutex);
+       ring_obj = ring->ring_obj;
+       ring->ring = NULL;
+       ring->ring_obj = NULL;
+       mutex_unlock(&ring->mutex);
 
        if (ring_obj) {
                r = radeon_bo_reserve(ring_obj, false);
@@ -422,72 +432,83 @@ void radeon_ring_fini(struct radeon_device *rdev)
        }
 }
 
-
 /*
  * Debugfs info
  */
 #if defined(CONFIG_DEBUG_FS)
-static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
+
+static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct radeon_ib *ib = node->info_ent->data;
-       unsigned i;
-
-       if (ib == NULL) {
-               return 0;
-       }
-       seq_printf(m, "IB %04u\n", ib->idx);
-       seq_printf(m, "IB fence %p\n", ib->fence);
-       seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
-       for (i = 0; i < ib->length_dw; i++) {
-               seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
+       struct drm_device *dev = node->minor->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       int ridx = *(int*)node->info_ent->data;
+       struct radeon_ring *ring = &rdev->ring[ridx];
+       unsigned count, i, j;
+
+       radeon_ring_free_size(rdev, ring);
+       count = (ring->ring_size / 4) - ring->ring_free_dw;
+       seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
+       seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
+       seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
+       seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
+       seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
+       seq_printf(m, "%u dwords in ring\n", count);
+       i = ring->rptr;
+       for (j = 0; j <= count; j++) {
+               seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+               i = (i + 1) & ring->ptr_mask;
        }
        return 0;
 }
 
-static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
+static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
+static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
+static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
+
+static struct drm_info_list radeon_debugfs_ring_info_list[] = {
+       {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
+       {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
+       {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
+};
+
+static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct radeon_device *rdev = node->info_ent->data;
-       struct radeon_ib *ib;
+       struct radeon_ib *ib = node->info_ent->data;
        unsigned i;
 
-       mutex_lock(&rdev->ib_pool.mutex);
-       if (list_empty(&rdev->ib_pool.bogus_ib)) {
-               mutex_unlock(&rdev->ib_pool.mutex);
-               seq_printf(m, "no bogus IB recorded\n");
+       if (ib == NULL) {
                return 0;
        }
-       ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
-       list_del_init(&ib->list);
-       mutex_unlock(&rdev->ib_pool.mutex);
+       seq_printf(m, "IB %04u\n", ib->idx);
+       seq_printf(m, "IB fence %p\n", ib->fence);
        seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
        for (i = 0; i < ib->length_dw; i++) {
                seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
        }
-       vfree(ib->ptr);
-       kfree(ib);
        return 0;
 }
 
 static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
 static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
+#endif
 
-static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
-       {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
-};
+int radeon_debugfs_ring_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list,
+                                       ARRAY_SIZE(radeon_debugfs_ring_info_list));
+#else
+       return 0;
 #endif
+}
 
 int radeon_debugfs_ib_init(struct radeon_device *rdev)
 {
 #if defined(CONFIG_DEBUG_FS)
        unsigned i;
-       int r;
 
-       radeon_debugfs_ib_bogus_info_list[0].data = rdev;
-       r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
-       if (r)
-               return r;
        for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
                sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
                radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
new file mode 100644 (file)
index 0000000..4cce47e
--- /dev/null
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Jerome Glisse <glisse@freedesktop.org>
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "radeon.h"
+
+int radeon_sa_bo_manager_init(struct radeon_device *rdev,
+                             struct radeon_sa_manager *sa_manager,
+                             unsigned size, u32 domain)
+{
+       int r;
+
+       sa_manager->bo = NULL;
+       sa_manager->size = size;
+       sa_manager->domain = domain;
+       INIT_LIST_HEAD(&sa_manager->sa_bo);
+
+       r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
+                            RADEON_GEM_DOMAIN_CPU, &sa_manager->bo);
+       if (r) {
+               dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
+               return r;
+       }
+
+       return r;
+}
+
+void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
+                              struct radeon_sa_manager *sa_manager)
+{
+       struct radeon_sa_bo *sa_bo, *tmp;
+
+       if (!list_empty(&sa_manager->sa_bo)) {
+               dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
+       }
+       list_for_each_entry_safe(sa_bo, tmp, &sa_manager->sa_bo, list) {
+               list_del_init(&sa_bo->list);
+       }
+       radeon_bo_unref(&sa_manager->bo);
+       sa_manager->size = 0;
+}
+
+int radeon_sa_bo_manager_start(struct radeon_device *rdev,
+                              struct radeon_sa_manager *sa_manager)
+{
+       int r;
+
+       if (sa_manager->bo == NULL) {
+               dev_err(rdev->dev, "no bo for sa manager\n");
+               return -EINVAL;
+       }
+
+       /* map the buffer */
+       r = radeon_bo_reserve(sa_manager->bo, false);
+       if (r) {
+               dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
+               return r;
+       }
+       r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
+       if (r) {
+               radeon_bo_unreserve(sa_manager->bo);
+               dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
+               return r;
+       }
+       r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
+       radeon_bo_unreserve(sa_manager->bo);
+       return r;
+}
+
+int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
+                                struct radeon_sa_manager *sa_manager)
+{
+       int r;
+
+       if (sa_manager->bo == NULL) {
+               dev_err(rdev->dev, "no bo for sa manager\n");
+               return -EINVAL;
+       }
+
+       r = radeon_bo_reserve(sa_manager->bo, false);
+       if (!r) {
+               radeon_bo_kunmap(sa_manager->bo);
+               radeon_bo_unpin(sa_manager->bo);
+               radeon_bo_unreserve(sa_manager->bo);
+       }
+       return r;
+}
+
+/*
+ * Principe is simple, we keep a list of sub allocation in offset
+ * order (first entry has offset == 0, last entry has the highest
+ * offset).
+ *
+ * When allocating new object we first check if there is room at
+ * the end total_size - (last_object_offset + last_object_size) >=
+ * alloc_size. If so we allocate new object there.
+ *
+ * When there is not enough room at the end, we start waiting for
+ * each sub object until we reach object_offset+object_size >=
+ * alloc_size, this object then become the sub object we return.
+ *
+ * Alignment can't be bigger than page size
+ */
+int radeon_sa_bo_new(struct radeon_device *rdev,
+                    struct radeon_sa_manager *sa_manager,
+                    struct radeon_sa_bo *sa_bo,
+                    unsigned size, unsigned align)
+{
+       struct radeon_sa_bo *tmp;
+       struct list_head *head;
+       unsigned offset = 0, wasted = 0;
+
+       BUG_ON(align > RADEON_GPU_PAGE_SIZE);
+       BUG_ON(size > sa_manager->size);
+
+       /* no one ? */
+       head = sa_manager->sa_bo.prev;
+       if (list_empty(&sa_manager->sa_bo)) {
+               goto out;
+       }
+
+       /* look for a hole big enough */
+       offset = 0;
+       list_for_each_entry(tmp, &sa_manager->sa_bo, list) {
+               /* room before this object ? */
+               if ((tmp->offset - offset) >= size) {
+                       head = tmp->list.prev;
+                       goto out;
+               }
+               offset = tmp->offset + tmp->size;
+               wasted = offset % align;
+               if (wasted) {
+                       wasted = align - wasted;
+               }
+               offset += wasted;
+       }
+       /* room at the end ? */
+       head = sa_manager->sa_bo.prev;
+       tmp = list_entry(head, struct radeon_sa_bo, list);
+       offset = tmp->offset + tmp->size;
+       wasted = offset % align;
+       if (wasted) {
+               wasted = align - wasted;
+       }
+       offset += wasted;
+       if ((sa_manager->size - offset) < size) {
+               /* failed to find somethings big enough */
+               return -ENOMEM;
+       }
+
+out:
+       sa_bo->manager = sa_manager;
+       sa_bo->offset = offset;
+       sa_bo->size = size;
+       list_add(&sa_bo->list, head);
+       return 0;
+}
+
+void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo *sa_bo)
+{
+       list_del_init(&sa_bo->list);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
new file mode 100644 (file)
index 0000000..61dd4e3
--- /dev/null
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2011 Christian König.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Christian König <deathsimple@vodafone.de>
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "radeon.h"
+
+static int radeon_semaphore_add_bo(struct radeon_device *rdev)
+{
+       struct radeon_semaphore_bo *bo;
+       unsigned long irq_flags;
+       uint64_t gpu_addr;
+       uint32_t *cpu_ptr;
+       int r, i;
+
+
+       bo = kmalloc(sizeof(struct radeon_semaphore_bo), GFP_KERNEL);
+       if (bo == NULL) {
+               return -ENOMEM;
+       }
+       INIT_LIST_HEAD(&bo->free);
+       INIT_LIST_HEAD(&bo->list);
+       bo->nused = 0;
+
+       r = radeon_ib_get(rdev, 0, &bo->ib, RADEON_SEMAPHORE_BO_SIZE);
+       if (r) {
+               dev_err(rdev->dev, "failed to get a bo after 5 retry\n");
+               kfree(bo);
+               return r;
+       }
+       gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
+       gpu_addr += bo->ib->sa_bo.offset;
+       cpu_ptr = rdev->ib_pool.sa_manager.cpu_ptr;
+       cpu_ptr += (bo->ib->sa_bo.offset >> 2);
+       for (i = 0; i < (RADEON_SEMAPHORE_BO_SIZE/8); i++) {
+               bo->semaphores[i].gpu_addr = gpu_addr;
+               bo->semaphores[i].cpu_ptr = cpu_ptr;
+               bo->semaphores[i].bo = bo;
+               list_add_tail(&bo->semaphores[i].list, &bo->free);
+               gpu_addr += 8;
+               cpu_ptr += 2;
+       }
+       write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
+       list_add_tail(&bo->list, &rdev->semaphore_drv.bo);
+       write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+       return 0;
+}
+
+static void radeon_semaphore_del_bo_locked(struct radeon_device *rdev,
+                                          struct radeon_semaphore_bo *bo)
+{
+       radeon_sa_bo_free(rdev, &bo->ib->sa_bo);
+       radeon_fence_unref(&bo->ib->fence);
+       list_del(&bo->list);
+       kfree(bo);
+}
+
+void radeon_semaphore_shrink_locked(struct radeon_device *rdev)
+{
+       struct radeon_semaphore_bo *bo, *n;
+
+       if (list_empty(&rdev->semaphore_drv.bo)) {
+               return;
+       }
+       /* only shrink if first bo has free semaphore */
+       bo = list_first_entry(&rdev->semaphore_drv.bo, struct radeon_semaphore_bo, list);
+       if (list_empty(&bo->free)) {
+               return;
+       }
+       list_for_each_entry_safe_continue(bo, n, &rdev->semaphore_drv.bo, list) {
+               if (bo->nused)
+                       continue;
+               radeon_semaphore_del_bo_locked(rdev, bo);
+       }
+}
+
+int radeon_semaphore_create(struct radeon_device *rdev,
+                           struct radeon_semaphore **semaphore)
+{
+       struct radeon_semaphore_bo *bo;
+       unsigned long irq_flags;
+       bool do_retry = true;
+       int r;
+
+retry:
+       *semaphore = NULL;
+       write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
+       list_for_each_entry(bo, &rdev->semaphore_drv.bo, list) {
+               if (list_empty(&bo->free))
+                       continue;
+               *semaphore = list_first_entry(&bo->free, struct radeon_semaphore, list);
+               (*semaphore)->cpu_ptr[0] = 0;
+               (*semaphore)->cpu_ptr[1] = 0;
+               list_del(&(*semaphore)->list);
+               bo->nused++;
+               break;
+       }
+       write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+
+       if (*semaphore == NULL) {
+               if (do_retry) {
+                       do_retry = false;
+                       r = radeon_semaphore_add_bo(rdev);
+                       if (r)
+                               return r;
+                       goto retry;
+               }
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+                                 struct radeon_semaphore *semaphore)
+{
+       radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
+}
+
+void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+                               struct radeon_semaphore *semaphore)
+{
+       radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
+}
+
+void radeon_semaphore_free(struct radeon_device *rdev,
+                          struct radeon_semaphore *semaphore)
+{
+       unsigned long irq_flags;
+
+       write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
+       semaphore->bo->nused--;
+       list_add_tail(&semaphore->list, &semaphore->bo->free);
+       radeon_semaphore_shrink_locked(rdev);
+       write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+}
+
+void radeon_semaphore_driver_fini(struct radeon_device *rdev)
+{
+       struct radeon_semaphore_bo *bo, *n;
+       unsigned long irq_flags;
+
+       write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
+       /* we force to free everything */
+       list_for_each_entry_safe(bo, n, &rdev->semaphore_drv.bo, list) {
+               if (!list_empty(&bo->free)) {
+                       dev_err(rdev->dev, "still in use semaphore\n");
+               }
+               radeon_semaphore_del_bo_locked(rdev, bo);
+       }
+       write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+}
index 602fa35..dc5dcf4 100644 (file)
@@ -42,7 +42,9 @@ void radeon_test_moves(struct radeon_device *rdev)
        /* Number of tests =
         * (Total GTT - IB pool - writeback page - ring buffers) / test size
         */
-       n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - rdev->cp.ring_size;
+       n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
+       for (i = 0; i < RADEON_NUM_RINGS; ++i)
+               n -= rdev->ring[i].ring_size;
        if (rdev->wb.wb_obj)
                n -= RADEON_GPU_PAGE_SIZE;
        if (rdev->ih.ring_obj)
@@ -104,7 +106,7 @@ void radeon_test_moves(struct radeon_device *rdev)
 
                radeon_bo_kunmap(gtt_obj[i]);
 
-               r = radeon_fence_create(rdev, &fence);
+               r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
                if (r) {
                        DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i);
                        goto out_cleanup;
@@ -153,7 +155,7 @@ void radeon_test_moves(struct radeon_device *rdev)
 
                radeon_bo_kunmap(vram_obj);
 
-               r = radeon_fence_create(rdev, &fence);
+               r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
                if (r) {
                        DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i);
                        goto out_cleanup;
@@ -232,3 +234,264 @@ out_cleanup:
                printk(KERN_WARNING "Error while testing BO move.\n");
        }
 }
+
+void radeon_test_ring_sync(struct radeon_device *rdev,
+                          struct radeon_ring *ringA,
+                          struct radeon_ring *ringB)
+{
+       struct radeon_fence *fence1 = NULL, *fence2 = NULL;
+       struct radeon_semaphore *semaphore = NULL;
+       int ridxA = radeon_ring_index(rdev, ringA);
+       int ridxB = radeon_ring_index(rdev, ringB);
+       int r;
+
+       r = radeon_fence_create(rdev, &fence1, ridxA);
+       if (r) {
+               DRM_ERROR("Failed to create sync fence 1\n");
+               goto out_cleanup;
+       }
+       r = radeon_fence_create(rdev, &fence2, ridxA);
+       if (r) {
+               DRM_ERROR("Failed to create sync fence 2\n");
+               goto out_cleanup;
+       }
+
+       r = radeon_semaphore_create(rdev, &semaphore);
+       if (r) {
+               DRM_ERROR("Failed to create semaphore\n");
+               goto out_cleanup;
+       }
+
+       r = radeon_ring_lock(rdev, ringA, 64);
+       if (r) {
+               DRM_ERROR("Failed to lock ring A %d\n", ridxA);
+               goto out_cleanup;
+       }
+       radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
+       radeon_fence_emit(rdev, fence1);
+       radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
+       radeon_fence_emit(rdev, fence2);
+       radeon_ring_unlock_commit(rdev, ringA);
+
+       mdelay(1000);
+
+       if (radeon_fence_signaled(fence1)) {
+               DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
+               goto out_cleanup;
+       }
+
+       r = radeon_ring_lock(rdev, ringB, 64);
+       if (r) {
+               DRM_ERROR("Failed to lock ring B %p\n", ringB);
+               goto out_cleanup;
+       }
+       radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
+       radeon_ring_unlock_commit(rdev, ringB);
+
+       r = radeon_fence_wait(fence1, false);
+       if (r) {
+               DRM_ERROR("Failed to wait for sync fence 1\n");
+               goto out_cleanup;
+       }
+
+       mdelay(1000);
+
+       if (radeon_fence_signaled(fence2)) {
+               DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
+               goto out_cleanup;
+       }
+
+       r = radeon_ring_lock(rdev, ringB, 64);
+       if (r) {
+               DRM_ERROR("Failed to lock ring B %p\n", ringB);
+               goto out_cleanup;
+       }
+       radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
+       radeon_ring_unlock_commit(rdev, ringB);
+
+       r = radeon_fence_wait(fence2, false);
+       if (r) {
+               DRM_ERROR("Failed to wait for sync fence 1\n");
+               goto out_cleanup;
+       }
+
+out_cleanup:
+       if (semaphore)
+               radeon_semaphore_free(rdev, semaphore);
+
+       if (fence1)
+               radeon_fence_unref(&fence1);
+
+       if (fence2)
+               radeon_fence_unref(&fence2);
+
+       if (r)
+               printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
+}
+
+void radeon_test_ring_sync2(struct radeon_device *rdev,
+                           struct radeon_ring *ringA,
+                           struct radeon_ring *ringB,
+                           struct radeon_ring *ringC)
+{
+       struct radeon_fence *fenceA = NULL, *fenceB = NULL;
+       struct radeon_semaphore *semaphore = NULL;
+       int ridxA = radeon_ring_index(rdev, ringA);
+       int ridxB = radeon_ring_index(rdev, ringB);
+       int ridxC = radeon_ring_index(rdev, ringC);
+       bool sigA, sigB;
+       int i, r;
+
+       r = radeon_fence_create(rdev, &fenceA, ridxA);
+       if (r) {
+               DRM_ERROR("Failed to create sync fence 1\n");
+               goto out_cleanup;
+       }
+       r = radeon_fence_create(rdev, &fenceB, ridxB);
+       if (r) {
+               DRM_ERROR("Failed to create sync fence 2\n");
+               goto out_cleanup;
+       }
+
+       r = radeon_semaphore_create(rdev, &semaphore);
+       if (r) {
+               DRM_ERROR("Failed to create semaphore\n");
+               goto out_cleanup;
+       }
+
+       r = radeon_ring_lock(rdev, ringA, 64);
+       if (r) {
+               DRM_ERROR("Failed to lock ring A %d\n", ridxA);
+               goto out_cleanup;
+       }
+       radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
+       radeon_fence_emit(rdev, fenceA);
+       radeon_ring_unlock_commit(rdev, ringA);
+
+       r = radeon_ring_lock(rdev, ringB, 64);
+       if (r) {
+               DRM_ERROR("Failed to lock ring B %d\n", ridxB);
+               goto out_cleanup;
+       }
+       radeon_semaphore_emit_wait(rdev, ridxB, semaphore);
+       radeon_fence_emit(rdev, fenceB);
+       radeon_ring_unlock_commit(rdev, ringB);
+
+       mdelay(1000);
+
+       if (radeon_fence_signaled(fenceA)) {
+               DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
+               goto out_cleanup;
+       }
+       if (radeon_fence_signaled(fenceB)) {
+               DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
+               goto out_cleanup;
+       }
+
+       r = radeon_ring_lock(rdev, ringC, 64);
+       if (r) {
+               DRM_ERROR("Failed to lock ring B %p\n", ringC);
+               goto out_cleanup;
+       }
+       radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
+       radeon_ring_unlock_commit(rdev, ringC);
+
+       for (i = 0; i < 30; ++i) {
+               mdelay(100);
+               sigA = radeon_fence_signaled(fenceA);
+               sigB = radeon_fence_signaled(fenceB);
+               if (sigA || sigB)
+                       break;
+       }
+
+       if (!sigA && !sigB) {
+               DRM_ERROR("Neither fence A nor B has been signaled\n");
+               goto out_cleanup;
+       } else if (sigA && sigB) {
+               DRM_ERROR("Both fence A and B has been signaled\n");
+               goto out_cleanup;
+       }
+
+       DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
+
+       r = radeon_ring_lock(rdev, ringC, 64);
+       if (r) {
+               DRM_ERROR("Failed to lock ring B %p\n", ringC);
+               goto out_cleanup;
+       }
+       radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
+       radeon_ring_unlock_commit(rdev, ringC);
+
+       mdelay(1000);
+
+       r = radeon_fence_wait(fenceA, false);
+       if (r) {
+               DRM_ERROR("Failed to wait for sync fence A\n");
+               goto out_cleanup;
+       }
+       r = radeon_fence_wait(fenceB, false);
+       if (r) {
+               DRM_ERROR("Failed to wait for sync fence B\n");
+               goto out_cleanup;
+       }
+
+out_cleanup:
+       if (semaphore)
+               radeon_semaphore_free(rdev, semaphore);
+
+       if (fenceA)
+               radeon_fence_unref(&fenceA);
+
+       if (fenceB)
+               radeon_fence_unref(&fenceB);
+
+       if (r)
+               printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
+}
+
+void radeon_test_syncing(struct radeon_device *rdev)
+{
+       int i, j, k;
+
+       for (i = 1; i < RADEON_NUM_RINGS; ++i) {
+               struct radeon_ring *ringA = &rdev->ring[i];
+               if (!ringA->ready)
+                       continue;
+
+               for (j = 0; j < i; ++j) {
+                       struct radeon_ring *ringB = &rdev->ring[j];
+                       if (!ringB->ready)
+                               continue;
+
+                       DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
+                       radeon_test_ring_sync(rdev, ringA, ringB);
+
+                       DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
+                       radeon_test_ring_sync(rdev, ringB, ringA);
+
+                       for (k = 0; k < j; ++k) {
+                               struct radeon_ring *ringC = &rdev->ring[k];
+                               if (!ringC->ready)
+                                       continue;
+
+                               DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
+                               radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
+
+                               DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
+                               radeon_test_ring_sync2(rdev, ringA, ringC, ringB);
+
+                               DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
+                               radeon_test_ring_sync2(rdev, ringB, ringA, ringC);
+
+                               DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
+                               radeon_test_ring_sync2(rdev, ringB, ringC, ringA);
+
+                               DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
+                               radeon_test_ring_sync2(rdev, ringC, ringA, ringB);
+
+                               DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
+                               radeon_test_ring_sync2(rdev, ringC, ringB, ringA);
+                       }
+               }
+       }
+}
index 0b5468b..c421e77 100644 (file)
@@ -114,24 +114,6 @@ static void radeon_ttm_global_fini(struct radeon_device *rdev)
        }
 }
 
-struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
-
-static struct ttm_backend*
-radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
-{
-       struct radeon_device *rdev;
-
-       rdev = radeon_get_rdev(bdev);
-#if __OS_HAS_AGP
-       if (rdev->flags & RADEON_IS_AGP) {
-               return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
-       } else
-#endif
-       {
-               return radeon_ttm_backend_create(rdev);
-       }
-}
-
 static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 {
        return 0;
@@ -206,7 +188,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
        rbo = container_of(bo, struct radeon_bo, tbo);
        switch (bo->mem.mem_type) {
        case TTM_PL_VRAM:
-               if (rbo->rdev->cp.ready == false)
+               if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
                        radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
                else
                        radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
@@ -241,10 +223,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
        struct radeon_device *rdev;
        uint64_t old_start, new_start;
        struct radeon_fence *fence;
-       int r;
+       int r, i;
 
        rdev = radeon_get_rdev(bo->bdev);
-       r = radeon_fence_create(rdev, &fence);
+       r = radeon_fence_create(rdev, &fence, rdev->copy_ring);
        if (unlikely(r)) {
                return r;
        }
@@ -273,13 +255,43 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
                DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
                return -EINVAL;
        }
-       if (!rdev->cp.ready) {
-               DRM_ERROR("Trying to move memory with CP turned off.\n");
+       if (!rdev->ring[rdev->copy_ring].ready) {
+               DRM_ERROR("Trying to move memory with ring turned off.\n");
                return -EINVAL;
        }
 
        BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
 
+       /* sync other rings */
+       if (rdev->family >= CHIP_R600) {
+               for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+                       /* no need to sync to our own or unused rings */
+                       if (i == rdev->copy_ring || !rdev->ring[i].ready)
+                               continue;
+
+                       if (!fence->semaphore) {
+                               r = radeon_semaphore_create(rdev, &fence->semaphore);
+                               /* FIXME: handle semaphore error */
+                               if (r)
+                                       continue;
+                       }
+
+                       r = radeon_ring_lock(rdev, &rdev->ring[i], 3);
+                       /* FIXME: handle ring lock error */
+                       if (r)
+                               continue;
+                       radeon_semaphore_emit_signal(rdev, i, fence->semaphore);
+                       radeon_ring_unlock_commit(rdev, &rdev->ring[i]);
+
+                       r = radeon_ring_lock(rdev, &rdev->ring[rdev->copy_ring], 3);
+                       /* FIXME: handle ring lock error */
+                       if (r)
+                               continue;
+                       radeon_semaphore_emit_wait(rdev, rdev->copy_ring, fence->semaphore);
+                       radeon_ring_unlock_commit(rdev, &rdev->ring[rdev->copy_ring]);
+               }
+       }
+
        r = radeon_copy(rdev, old_start, new_start,
                        new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
                        fence);
@@ -398,7 +410,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
                radeon_move_null(bo, new_mem);
                return 0;
        }
-       if (!rdev->cp.ready || rdev->asic->copy == NULL) {
+       if (!rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready || rdev->asic->copy == NULL) {
                /* use memcpy */
                goto memcpy;
        }
@@ -515,8 +527,166 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
        return radeon_fence_signaled((struct radeon_fence *)sync_obj);
 }
 
+/*
+ * TTM backend functions.
+ */
+struct radeon_ttm_tt {
+       struct ttm_dma_tt               ttm;
+       struct radeon_device            *rdev;
+       u64                             offset;
+};
+
+static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
+                                  struct ttm_mem_reg *bo_mem)
+{
+       struct radeon_ttm_tt *gtt = (void*)ttm;
+       int r;
+
+       gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
+       if (!ttm->num_pages) {
+               WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+                    ttm->num_pages, bo_mem, ttm);
+       }
+       r = radeon_gart_bind(gtt->rdev, gtt->offset,
+                            ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
+       if (r) {
+               DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
+                         ttm->num_pages, (unsigned)gtt->offset);
+               return r;
+       }
+       return 0;
+}
+
+static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
+{
+       struct radeon_ttm_tt *gtt = (void *)ttm;
+
+       radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
+       return 0;
+}
+
+static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
+{
+       struct radeon_ttm_tt *gtt = (void *)ttm;
+
+       ttm_dma_tt_fini(&gtt->ttm);
+       kfree(gtt);
+}
+
+static struct ttm_backend_func radeon_backend_func = {
+       .bind = &radeon_ttm_backend_bind,
+       .unbind = &radeon_ttm_backend_unbind,
+       .destroy = &radeon_ttm_backend_destroy,
+};
+
+struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
+                                   unsigned long size, uint32_t page_flags,
+                                   struct page *dummy_read_page)
+{
+       struct radeon_device *rdev;
+       struct radeon_ttm_tt *gtt;
+
+       rdev = radeon_get_rdev(bdev);
+#if __OS_HAS_AGP
+       if (rdev->flags & RADEON_IS_AGP) {
+               return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
+                                        size, page_flags, dummy_read_page);
+       }
+#endif
+
+       gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
+       if (gtt == NULL) {
+               return NULL;
+       }
+       gtt->ttm.ttm.func = &radeon_backend_func;
+       gtt->rdev = rdev;
+       if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
+               kfree(gtt);
+               return NULL;
+       }
+       return &gtt->ttm.ttm;
+}
+
+static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
+{
+       struct radeon_device *rdev;
+       struct radeon_ttm_tt *gtt = (void *)ttm;
+       unsigned i;
+       int r;
+
+       if (ttm->state != tt_unpopulated)
+               return 0;
+
+       rdev = radeon_get_rdev(ttm->bdev);
+#if __OS_HAS_AGP
+       if (rdev->flags & RADEON_IS_AGP) {
+               return ttm_agp_tt_populate(ttm);
+       }
+#endif
+
+#ifdef CONFIG_SWIOTLB
+       if (swiotlb_nr_tbl()) {
+               return ttm_dma_populate(&gtt->ttm, rdev->dev);
+       }
+#endif
+
+       r = ttm_pool_populate(ttm);
+       if (r) {
+               return r;
+       }
+
+       for (i = 0; i < ttm->num_pages; i++) {
+               gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
+                                                      0, PAGE_SIZE,
+                                                      PCI_DMA_BIDIRECTIONAL);
+               if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
+                       while (--i) {
+                               pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
+                                              PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+                               gtt->ttm.dma_address[i] = 0;
+                       }
+                       ttm_pool_unpopulate(ttm);
+                       return -EFAULT;
+               }
+       }
+       return 0;
+}
+
+static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+       struct radeon_device *rdev;
+       struct radeon_ttm_tt *gtt = (void *)ttm;
+       unsigned i;
+
+       rdev = radeon_get_rdev(ttm->bdev);
+#if __OS_HAS_AGP
+       if (rdev->flags & RADEON_IS_AGP) {
+               ttm_agp_tt_unpopulate(ttm);
+               return;
+       }
+#endif
+
+#ifdef CONFIG_SWIOTLB
+       if (swiotlb_nr_tbl()) {
+               ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
+               return;
+       }
+#endif
+
+       for (i = 0; i < ttm->num_pages; i++) {
+               if (gtt->ttm.dma_address[i]) {
+                       pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
+                                      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+               }
+       }
+
+       ttm_pool_unpopulate(ttm);
+}
+
 static struct ttm_bo_driver radeon_bo_driver = {
-       .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
+       .ttm_tt_create = &radeon_ttm_tt_create,
+       .ttm_tt_populate = &radeon_ttm_tt_populate,
+       .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
        .invalidate_caches = &radeon_invalidate_caches,
        .init_mem_type = &radeon_init_mem_type,
        .evict_flags = &radeon_evict_flags,
@@ -680,124 +850,6 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
 }
 
 
-/*
- * TTM backend functions.
- */
-struct radeon_ttm_backend {
-       struct ttm_backend              backend;
-       struct radeon_device            *rdev;
-       unsigned long                   num_pages;
-       struct page                     **pages;
-       struct page                     *dummy_read_page;
-       dma_addr_t                      *dma_addrs;
-       bool                            populated;
-       bool                            bound;
-       unsigned                        offset;
-};
-
-static int radeon_ttm_backend_populate(struct ttm_backend *backend,
-                                      unsigned long num_pages,
-                                      struct page **pages,
-                                      struct page *dummy_read_page,
-                                      dma_addr_t *dma_addrs)
-{
-       struct radeon_ttm_backend *gtt;
-
-       gtt = container_of(backend, struct radeon_ttm_backend, backend);
-       gtt->pages = pages;
-       gtt->dma_addrs = dma_addrs;
-       gtt->num_pages = num_pages;
-       gtt->dummy_read_page = dummy_read_page;
-       gtt->populated = true;
-       return 0;
-}
-
-static void radeon_ttm_backend_clear(struct ttm_backend *backend)
-{
-       struct radeon_ttm_backend *gtt;
-
-       gtt = container_of(backend, struct radeon_ttm_backend, backend);
-       gtt->pages = NULL;
-       gtt->dma_addrs = NULL;
-       gtt->num_pages = 0;
-       gtt->dummy_read_page = NULL;
-       gtt->populated = false;
-       gtt->bound = false;
-}
-
-
-static int radeon_ttm_backend_bind(struct ttm_backend *backend,
-                                  struct ttm_mem_reg *bo_mem)
-{
-       struct radeon_ttm_backend *gtt;
-       int r;
-
-       gtt = container_of(backend, struct radeon_ttm_backend, backend);
-       gtt->offset = bo_mem->start << PAGE_SHIFT;
-       if (!gtt->num_pages) {
-               WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
-                    gtt->num_pages, bo_mem, backend);
-       }
-       r = radeon_gart_bind(gtt->rdev, gtt->offset,
-                            gtt->num_pages, gtt->pages, gtt->dma_addrs);
-       if (r) {
-               DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
-                         gtt->num_pages, gtt->offset);
-               return r;
-       }
-       gtt->bound = true;
-       return 0;
-}
-
-static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
-{
-       struct radeon_ttm_backend *gtt;
-
-       gtt = container_of(backend, struct radeon_ttm_backend, backend);
-       radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
-       gtt->bound = false;
-       return 0;
-}
-
-static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
-{
-       struct radeon_ttm_backend *gtt;
-
-       gtt = container_of(backend, struct radeon_ttm_backend, backend);
-       if (gtt->bound) {
-               radeon_ttm_backend_unbind(backend);
-       }
-       kfree(gtt);
-}
-
-static struct ttm_backend_func radeon_backend_func = {
-       .populate = &radeon_ttm_backend_populate,
-       .clear = &radeon_ttm_backend_clear,
-       .bind = &radeon_ttm_backend_bind,
-       .unbind = &radeon_ttm_backend_unbind,
-       .destroy = &radeon_ttm_backend_destroy,
-};
-
-struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
-{
-       struct radeon_ttm_backend *gtt;
-
-       gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
-       if (gtt == NULL) {
-               return NULL;
-       }
-       gtt->backend.bdev = &rdev->mman.bdev;
-       gtt->backend.flags = 0;
-       gtt->backend.func = &radeon_backend_func;
-       gtt->rdev = rdev;
-       gtt->pages = NULL;
-       gtt->num_pages = 0;
-       gtt->dummy_read_page = NULL;
-       gtt->populated = false;
-       gtt->bound = false;
-       return &gtt->backend;
-}
-
 #define RADEON_DEBUGFS_MEM_TYPES 2
 
 #if defined(CONFIG_DEBUG_FS)
@@ -820,8 +872,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
 {
 #if defined(CONFIG_DEBUG_FS)
-       static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
-       static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
+       static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
+       static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
        unsigned i;
 
        for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
@@ -843,8 +895,17 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
        radeon_mem_types_list[i].name = radeon_mem_types_names[i];
        radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
        radeon_mem_types_list[i].driver_features = 0;
-       radeon_mem_types_list[i].data = NULL;
-       return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
+       radeon_mem_types_list[i++].data = NULL;
+#ifdef CONFIG_SWIOTLB
+       if (swiotlb_nr_tbl()) {
+               sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
+               radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+               radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
+               radeon_mem_types_list[i].driver_features = 0;
+               radeon_mem_types_list[i++].data = NULL;
+       }
+#endif
+       return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
 
 #endif
        return 0;
index 06b90c8..b0ce84a 100644 (file)
@@ -410,6 +410,12 @@ static int rs400_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r100_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -419,11 +425,18 @@ static int rs400_startup(struct radeon_device *rdev)
                dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
                return r;
        }
-       r = r100_ib_init(rdev);
+
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r100_ib_test(rdev);
        if (r) {
-               dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+               dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
                return r;
        }
+
        return 0;
 }
 
@@ -447,11 +460,14 @@ int rs400_resume(struct radeon_device *rdev)
        r300_clock_startup(rdev);
        /* Initialize surface registers */
        radeon_surface_init(rdev);
+
+       rdev->accel_working = true;
        return rs400_startup(rdev);
 }
 
 int rs400_suspend(struct radeon_device *rdev)
 {
+       radeon_ib_pool_suspend(rdev);
        r100_cp_disable(rdev);
        radeon_wb_disable(rdev);
        r100_irq_disable(rdev);
@@ -530,7 +546,14 @@ int rs400_init(struct radeon_device *rdev)
        if (r)
                return r;
        r300_set_reg_safe(rdev);
+
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
        r = rs400_startup(rdev);
        if (r) {
                /* Somethings want wront with the accel init stop accel */
index b1053d6..803e0d3 100644 (file)
@@ -324,10 +324,10 @@ void rs600_hpd_fini(struct radeon_device *rdev)
 
 void rs600_bm_disable(struct radeon_device *rdev)
 {
-       u32 tmp;
+       u16 tmp;
 
        /* disable bus mastering */
-       pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
+       pci_read_config_word(rdev->pdev, 0x4, &tmp);
        pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
        mdelay(1);
 }
@@ -549,7 +549,7 @@ int rs600_irq_set(struct radeon_device *rdev)
                WREG32(R_000040_GEN_INT_CNTL, 0);
                return -EINVAL;
        }
-       if (rdev->irq.sw_int) {
+       if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
                tmp |= S_000040_SW_INT_EN(1);
        }
        if (rdev->irq.gui_idle) {
@@ -642,7 +642,7 @@ int rs600_irq_process(struct radeon_device *rdev)
        while (status || rdev->irq.stat_regs.r500.disp_int) {
                /* SW interrupt */
                if (G_000044_SW_INT(status)) {
-                       radeon_fence_process(rdev);
+                       radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                }
                /* GUI idle */
                if (G_000040_GUI_IDLE(status)) {
@@ -849,6 +849,12 @@ static int rs600_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        rs600_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -858,15 +864,21 @@ static int rs600_startup(struct radeon_device *rdev)
                dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
                return r;
        }
-       r = r100_ib_init(rdev);
+
+       r = r600_audio_init(rdev);
        if (r) {
-               dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+               dev_err(rdev->dev, "failed initializing audio\n");
                return r;
        }
 
-       r = r600_audio_init(rdev);
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r100_ib_test(rdev);
        if (r) {
-               dev_err(rdev->dev, "failed initializing audio\n");
+               dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
                return r;
        }
 
@@ -891,11 +903,14 @@ int rs600_resume(struct radeon_device *rdev)
        rv515_clock_startup(rdev);
        /* Initialize surface registers */
        radeon_surface_init(rdev);
+
+       rdev->accel_working = true;
        return rs600_startup(rdev);
 }
 
 int rs600_suspend(struct radeon_device *rdev)
 {
+       radeon_ib_pool_suspend(rdev);
        r600_audio_fini(rdev);
        r100_cp_disable(rdev);
        radeon_wb_disable(rdev);
@@ -976,7 +991,14 @@ int rs600_init(struct radeon_device *rdev)
        if (r)
                return r;
        rs600_set_safe_registers(rdev);
+
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
        r = rs600_startup(rdev);
        if (r) {
                /* Somethings want wront with the accel init stop accel */
index a9049ed..4f24a0f 100644 (file)
@@ -621,6 +621,12 @@ static int rs690_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        rs600_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -630,15 +636,21 @@ static int rs690_startup(struct radeon_device *rdev)
                dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
                return r;
        }
-       r = r100_ib_init(rdev);
+
+       r = r600_audio_init(rdev);
        if (r) {
-               dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+               dev_err(rdev->dev, "failed initializing audio\n");
                return r;
        }
 
-       r = r600_audio_init(rdev);
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r100_ib_test(rdev);
        if (r) {
-               dev_err(rdev->dev, "failed initializing audio\n");
+               dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
                return r;
        }
 
@@ -663,11 +675,14 @@ int rs690_resume(struct radeon_device *rdev)
        rv515_clock_startup(rdev);
        /* Initialize surface registers */
        radeon_surface_init(rdev);
+
+       rdev->accel_working = true;
        return rs690_startup(rdev);
 }
 
 int rs690_suspend(struct radeon_device *rdev)
 {
+       radeon_ib_pool_suspend(rdev);
        r600_audio_fini(rdev);
        r100_cp_disable(rdev);
        radeon_wb_disable(rdev);
@@ -749,7 +764,14 @@ int rs690_init(struct radeon_device *rdev)
        if (r)
                return r;
        rs600_set_safe_registers(rdev);
+
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
        r = rs690_startup(rdev);
        if (r) {
                /* Somethings want wront with the accel init stop accel */
index 6613ee9..880637f 100644 (file)
@@ -55,44 +55,45 @@ void rv515_debugfs(struct radeon_device *rdev)
 
 void rv515_ring_start(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r;
 
-       r = radeon_ring_lock(rdev, 64);
+       r = radeon_ring_lock(rdev, ring, 64);
        if (r) {
                return;
        }
-       radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0));
+       radeon_ring_write(ring,
                          ISYNC_ANY2D_IDLE3D |
                          ISYNC_ANY3D_IDLE2D |
                          ISYNC_WAIT_IDLEGUI |
                          ISYNC_CPSCRATCH_IDLEGUI);
-       radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
-       radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
-       radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
-       radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
-       radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, PACKET0(R500_SU_REG_DEST, 0));
-       radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
-       radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
-       radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
-       radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
-       radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
-       radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
-       radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
-       radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
-       radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
+       radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
+       radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
+       radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
+       radeon_ring_write(ring, PACKET0(GB_SELECT, 0));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, PACKET0(GB_ENABLE, 0));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, PACKET0(R500_SU_REG_DEST, 0));
+       radeon_ring_write(ring, (1 << rdev->num_gb_pipes) - 1);
+       radeon_ring_write(ring, PACKET0(VAP_INDEX_OFFSET, 0));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
+       radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
+       radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
+       radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
+       radeon_ring_write(ring, PACKET0(GB_AA_CONFIG, 0));
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
+       radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
+       radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
+       radeon_ring_write(ring, PACKET0(GB_MSPOS0, 0));
+       radeon_ring_write(ring,
                          ((6 << MS_X0_SHIFT) |
                           (6 << MS_Y0_SHIFT) |
                           (6 << MS_X1_SHIFT) |
@@ -101,8 +102,8 @@ void rv515_ring_start(struct radeon_device *rdev)
                           (6 << MS_Y2_SHIFT) |
                           (6 << MSBD0_Y_SHIFT) |
                           (6 << MSBD0_X_SHIFT)));
-       radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0));
-       radeon_ring_write(rdev,
+       radeon_ring_write(ring, PACKET0(GB_MSPOS1, 0));
+       radeon_ring_write(ring,
                          ((6 << MS_X3_SHIFT) |
                           (6 << MS_Y3_SHIFT) |
                           (6 << MS_X4_SHIFT) |
@@ -110,15 +111,15 @@ void rv515_ring_start(struct radeon_device *rdev)
                           (6 << MS_X5_SHIFT) |
                           (6 << MS_Y5_SHIFT) |
                           (6 << MSBD1_SHIFT)));
-       radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0));
-       radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
-       radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0));
-       radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
-       radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0));
-       radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
-       radeon_ring_write(rdev, PACKET0(0x20C8, 0));
-       radeon_ring_write(rdev, 0);
-       radeon_ring_unlock_commit(rdev);
+       radeon_ring_write(ring, PACKET0(GA_ENHANCE, 0));
+       radeon_ring_write(ring, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
+       radeon_ring_write(ring, PACKET0(GA_POLY_MODE, 0));
+       radeon_ring_write(ring, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
+       radeon_ring_write(ring, PACKET0(GA_ROUND_MODE, 0));
+       radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
+       radeon_ring_write(ring, PACKET0(0x20C8, 0));
+       radeon_ring_write(ring, 0);
+       radeon_ring_unlock_commit(rdev, ring);
 }
 
 int rv515_mc_wait_for_idle(struct radeon_device *rdev)
@@ -392,6 +393,12 @@ static int rv515_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        rs600_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -401,9 +408,15 @@ static int rv515_startup(struct radeon_device *rdev)
                dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
                return r;
        }
-       r = r100_ib_init(rdev);
+
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r100_ib_test(rdev);
        if (r) {
-               dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+               dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+               rdev->accel_working = false;
                return r;
        }
        return 0;
@@ -428,6 +441,8 @@ int rv515_resume(struct radeon_device *rdev)
        rv515_clock_startup(rdev);
        /* Initialize surface registers */
        radeon_surface_init(rdev);
+
+       rdev->accel_working = true;
        return rv515_startup(rdev);
 }
 
@@ -524,7 +539,14 @@ int rv515_init(struct radeon_device *rdev)
        if (r)
                return r;
        rv515_set_safe_registers(rdev);
+
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
        r = rv515_startup(rdev);
        if (r) {
                /* Somethings want wront with the accel init stop accel */
index 23ae1c6..a1668b6 100644 (file)
@@ -357,7 +357,7 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
 void r700_cp_fini(struct radeon_device *rdev)
 {
        r700_cp_stop(rdev);
-       radeon_ring_fini(rdev);
+       radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
 }
 
 /*
@@ -1043,6 +1043,7 @@ int rv770_mc_init(struct radeon_device *rdev)
 
 static int rv770_startup(struct radeon_device *rdev)
 {
+       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r;
 
        /* enable pcie gen2 link */
@@ -1082,6 +1083,12 @@ static int rv770_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
@@ -1091,7 +1098,9 @@ static int rv770_startup(struct radeon_device *rdev)
        }
        r600_irq_set(rdev);
 
-       r = radeon_ring_init(rdev, rdev->cp.ring_size);
+       r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+                            R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+                            0, 0xfffff, RADEON_CP_PACKET2);
        if (r)
                return r;
        r = rv770_cp_load_microcode(rdev);
@@ -1101,6 +1110,17 @@ static int rv770_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_ib_pool_start(rdev);
+       if (r)
+               return r;
+
+       r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "IB test failed (%d).\n", r);
+               rdev->accel_working = false;
+               return r;
+       }
+
        return 0;
 }
 
@@ -1115,18 +1135,13 @@ int rv770_resume(struct radeon_device *rdev)
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
 
+       rdev->accel_working = true;
        r = rv770_startup(rdev);
        if (r) {
                DRM_ERROR("r600 startup failed on resume\n");
                return r;
        }
 
-       r = r600_ib_test(rdev);
-       if (r) {
-               DRM_ERROR("radeon: failed testing IB (%d).\n", r);
-               return r;
-       }
-
        r = r600_audio_init(rdev);
        if (r) {
                dev_err(rdev->dev, "radeon: audio init failed\n");
@@ -1140,13 +1155,14 @@ int rv770_resume(struct radeon_device *rdev)
 int rv770_suspend(struct radeon_device *rdev)
 {
        r600_audio_fini(rdev);
+       radeon_ib_pool_suspend(rdev);
+       r600_blit_suspend(rdev);
        /* FIXME: we should wait for ring to be empty */
        r700_cp_stop(rdev);
-       rdev->cp.ready = false;
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
        r600_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        rv770_pcie_gart_disable(rdev);
-       r600_blit_suspend(rdev);
 
        return 0;
 }
@@ -1215,8 +1231,8 @@ int rv770_init(struct radeon_device *rdev)
        if (r)
                return r;
 
-       rdev->cp.ring_obj = NULL;
-       r600_ring_init(rdev, 1024 * 1024);
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+       r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
@@ -1225,30 +1241,24 @@ int rv770_init(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = radeon_ib_pool_init(rdev);
        rdev->accel_working = true;
+       if (r) {
+               dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+               rdev->accel_working = false;
+       }
+
        r = rv770_startup(rdev);
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                r700_cp_fini(rdev);
                r600_irq_fini(rdev);
                radeon_wb_fini(rdev);
+               r100_ib_fini(rdev);
                radeon_irq_kms_fini(rdev);
                rv770_pcie_gart_fini(rdev);
                rdev->accel_working = false;
        }
-       if (rdev->accel_working) {
-               r = radeon_ib_pool_init(rdev);
-               if (r) {
-                       dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
-                       rdev->accel_working = false;
-               } else {
-                       r = r600_ib_test(rdev);
-                       if (r) {
-                               dev_err(rdev->dev, "IB test failed (%d).\n", r);
-                               rdev->accel_working = false;
-                       }
-               }
-       }
 
        r = r600_audio_init(rdev);
        if (r) {
@@ -1265,11 +1275,12 @@ void rv770_fini(struct radeon_device *rdev)
        r700_cp_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
-       radeon_ib_pool_fini(rdev);
+       r100_ib_fini(rdev);
        radeon_irq_kms_fini(rdev);
        rv770_pcie_gart_fini(rdev);
        r600_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
+       radeon_semaphore_driver_fini(rdev);
        radeon_fence_driver_fini(rdev);
        radeon_agp_fini(rdev);
        radeon_bo_fini(rdev);
index 5468d1c..89afe0b 100644 (file)
@@ -35,6 +35,17 @@ static struct pci_device_id pciidlist[] = {
        savage_PCI_IDS
 };
 
+static const struct file_operations savage_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = drm_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .llseek = noop_llseek,
+};
+
 static struct drm_driver driver = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
@@ -46,17 +57,7 @@ static struct drm_driver driver = {
        .reclaim_buffers = savage_reclaim_buffers,
        .ioctls = savage_ioctls,
        .dma_ioctl = savage_bci_buffers,
-       .fops = {
-                .owner = THIS_MODULE,
-                .open = drm_open,
-                .release = drm_release,
-                .unlocked_ioctl = drm_ioctl,
-                .mmap = drm_mmap,
-                .poll = drm_poll,
-                .fasync = drm_fasync,
-                .llseek = noop_llseek,
-       },
-
+       .fops = &savage_driver_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = DRIVER_DATE,
index a9c5716..06da063 100644 (file)
@@ -48,9 +48,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
 
        dev->dev_private = (void *)dev_priv;
        dev_priv->chipset = chipset;
-       ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
-       if (ret)
-               kfree(dev_priv);
+       idr_init(&dev->object_name_idr);
 
        return ret;
 }
@@ -59,32 +57,60 @@ static int sis_driver_unload(struct drm_device *dev)
 {
        drm_sis_private_t *dev_priv = dev->dev_private;
 
-       drm_sman_takedown(&dev_priv->sman);
+       idr_remove_all(&dev_priv->object_idr);
+       idr_destroy(&dev_priv->object_idr);
+
        kfree(dev_priv);
 
        return 0;
 }
 
+static const struct file_operations sis_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = drm_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .llseek = noop_llseek,
+};
+
+static int sis_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+       struct sis_file_private *file_priv;
+
+       DRM_DEBUG_DRIVER("\n");
+       file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
+       if (!file_priv)
+               return -ENOMEM;
+
+       file->driver_priv = file_priv;
+
+       INIT_LIST_HEAD(&file_priv->obj_list);
+
+       return 0;
+}
+
+void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+       struct sis_file_private *file_priv = file->driver_priv;
+
+       kfree(file_priv);
+}
+
 static struct drm_driver driver = {
        .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
        .load = sis_driver_load,
        .unload = sis_driver_unload,
+       .open = sis_driver_open,
+       .postclose = sis_driver_postclose,
        .dma_quiescent = sis_idle,
        .reclaim_buffers = NULL,
        .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
        .lastclose = sis_lastclose,
        .ioctls = sis_ioctls,
-       .fops = {
-                .owner = THIS_MODULE,
-                .open = drm_open,
-                .release = drm_release,
-                .unlocked_ioctl = drm_ioctl,
-                .mmap = drm_mmap,
-                .poll = drm_poll,
-                .fasync = drm_fasync,
-                .llseek = noop_llseek,
-       },
-
+       .fops = &sis_driver_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = DRIVER_DATE,
index 194303c..573758b 100644 (file)
@@ -44,7 +44,7 @@ enum sis_family {
        SIS_CHIP_315 = 1,
 };
 
-#include "drm_sman.h"
+#include "drm_mm.h"
 
 
 #define SIS_BASE (dev_priv->mmio)
@@ -54,12 +54,15 @@ enum sis_family {
 typedef struct drm_sis_private {
        drm_local_map_t *mmio;
        unsigned int idle_fault;
-       struct drm_sman sman;
        unsigned int chipset;
        int vram_initialized;
        int agp_initialized;
        unsigned long vram_offset;
        unsigned long agp_offset;
+       struct drm_mm vram_mm;
+       struct drm_mm agp_mm;
+       /** Mapping of userspace keys to mm objects */
+       struct idr object_idr;
 } drm_sis_private_t;
 
 extern int sis_idle(struct drm_device *dev);
index 7fe2b63..dd4a316 100644 (file)
 #define AGP_TYPE 1
 
 
+struct sis_memblock {
+       struct drm_mm_node mm_node;
+       struct sis_memreq req;
+       struct list_head owner_list;
+};
+
 #if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
 /* fb management via fb device */
 
 #define SIS_MM_ALIGN_SHIFT 0
 #define SIS_MM_ALIGN_MASK 0
 
-static void *sis_sman_mm_allocate(void *private, unsigned long size,
-                                 unsigned alignment)
-{
-       struct sis_memreq req;
-
-       req.size = size;
-       sis_malloc(&req);
-       if (req.size == 0)
-               return NULL;
-       else
-               return (void *)(unsigned long)~req.offset;
-}
-
-static void sis_sman_mm_free(void *private, void *ref)
-{
-       sis_free(~((unsigned long)ref));
-}
-
-static void sis_sman_mm_destroy(void *private)
-{
-       ;
-}
-
-static unsigned long sis_sman_mm_offset(void *private, void *ref)
-{
-       return ~((unsigned long)ref);
-}
-
 #else /* CONFIG_FB_SIS[_MODULE] */
 
 #define SIS_MM_ALIGN_SHIFT 4
@@ -86,30 +64,11 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
 {
        drm_sis_private_t *dev_priv = dev->dev_private;
        drm_sis_fb_t *fb = data;
-       int ret;
 
        mutex_lock(&dev->struct_mutex);
-#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
-       {
-               struct drm_sman_mm sman_mm;
-               sman_mm.private = (void *)0xFFFFFFFF;
-               sman_mm.allocate = sis_sman_mm_allocate;
-               sman_mm.free = sis_sman_mm_free;
-               sman_mm.destroy = sis_sman_mm_destroy;
-               sman_mm.offset = sis_sman_mm_offset;
-               ret =
-                   drm_sman_set_manager(&dev_priv->sman, VIDEO_TYPE, &sman_mm);
-       }
-#else
-       ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0,
-                                fb->size >> SIS_MM_ALIGN_SHIFT);
-#endif
-
-       if (ret) {
-               DRM_ERROR("VRAM memory manager initialisation error\n");
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
-       }
+       /* Unconditionally init the drm_mm, even though we don't use it when the
+        * fb sis driver is available - make cleanup easier. */
+       drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> SIS_MM_ALIGN_SHIFT);
 
        dev_priv->vram_initialized = 1;
        dev_priv->vram_offset = fb->offset;
@@ -120,13 +79,15 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
        return 0;
 }
 
-static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
+static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
                         void *data, int pool)
 {
        drm_sis_private_t *dev_priv = dev->dev_private;
        drm_sis_mem_t *mem = data;
-       int retval = 0;
-       struct drm_memblock_item *item;
+       int retval = 0, user_key;
+       struct sis_memblock *item;
+       struct sis_file_private *file_priv = file->driver_priv;
+       unsigned long offset;
 
        mutex_lock(&dev->struct_mutex);
 
@@ -138,25 +99,68 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
                return -EINVAL;
        }
 
-       mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
-       item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0,
-                             (unsigned long)file_priv);
+       item = kzalloc(sizeof(*item), GFP_KERNEL);
+       if (!item) {
+               retval = -ENOMEM;
+               goto fail_alloc;
+       }
 
-       mutex_unlock(&dev->struct_mutex);
-       if (item) {
-               mem->offset = ((pool == 0) ?
-                             dev_priv->vram_offset : dev_priv->agp_offset) +
-                   (item->mm->
-                    offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT);
-               mem->free = item->user_hash.key;
-               mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
+       mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
+       if (pool == AGP_TYPE) {
+               retval = drm_mm_insert_node(&dev_priv->agp_mm,
+                                           &item->mm_node,
+                                           mem->size, 0);
+               offset = item->mm_node.start;
        } else {
-               mem->offset = 0;
-               mem->size = 0;
-               mem->free = 0;
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
+               item->req.size = mem->size;
+               sis_malloc(&item->req);
+               if (item->req.size == 0)
+                       retval = -ENOMEM;
+               offset = item->req.offset;
+#else
+               retval = drm_mm_insert_node(&dev_priv->vram_mm,
+                                           &item->mm_node,
+                                           mem->size, 0);
+               offset = item->mm_node.start;
+#endif
+       }
+       if (retval)
+               goto fail_alloc;
+
+again:
+       if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
                retval = -ENOMEM;
+               goto fail_idr;
        }
 
+       retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
+       if (retval == -EAGAIN)
+               goto again;
+       if (retval)
+               goto fail_idr;
+
+       list_add(&item->owner_list, &file_priv->obj_list);
+       mutex_unlock(&dev->struct_mutex);
+
+       mem->offset = ((pool == 0) ?
+                     dev_priv->vram_offset : dev_priv->agp_offset) +
+           (offset << SIS_MM_ALIGN_SHIFT);
+       mem->free = user_key;
+       mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
+
+       return 0;
+
+fail_idr:
+       drm_mm_remove_node(&item->mm_node);
+fail_alloc:
+       kfree(item);
+       mutex_unlock(&dev->struct_mutex);
+
+       mem->offset = 0;
+       mem->size = 0;
+       mem->free = 0;
+
        DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size,
                  mem->offset);
 
@@ -167,14 +171,28 @@ static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *fil
 {
        drm_sis_private_t *dev_priv = dev->dev_private;
        drm_sis_mem_t *mem = data;
-       int ret;
+       struct sis_memblock *obj;
 
        mutex_lock(&dev->struct_mutex);
-       ret = drm_sman_free_key(&dev_priv->sman, mem->free);
+       obj = idr_find(&dev_priv->object_idr, mem->free);
+       if (obj == NULL) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       idr_remove(&dev_priv->object_idr, mem->free);
+       list_del(&obj->owner_list);
+       if (drm_mm_node_allocated(&obj->mm_node))
+               drm_mm_remove_node(&obj->mm_node);
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
+       else
+               sis_free(obj->req.offset);
+#endif
+       kfree(obj);
        mutex_unlock(&dev->struct_mutex);
        DRM_DEBUG("free = 0x%lx\n", mem->free);
 
-       return ret;
+       return 0;
 }
 
 static int sis_fb_alloc(struct drm_device *dev, void *data,
@@ -188,18 +206,10 @@ static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
 {
        drm_sis_private_t *dev_priv = dev->dev_private;
        drm_sis_agp_t *agp = data;
-       int ret;
        dev_priv = dev->dev_private;
 
        mutex_lock(&dev->struct_mutex);
-       ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0,
-                                agp->size >> SIS_MM_ALIGN_SHIFT);
-
-       if (ret) {
-               DRM_ERROR("AGP memory manager initialisation error\n");
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
-       }
+       drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> SIS_MM_ALIGN_SHIFT);
 
        dev_priv->agp_initialized = 1;
        dev_priv->agp_offset = agp->offset;
@@ -293,20 +303,26 @@ void sis_lastclose(struct drm_device *dev)
                return;
 
        mutex_lock(&dev->struct_mutex);
-       drm_sman_cleanup(&dev_priv->sman);
-       dev_priv->vram_initialized = 0;
-       dev_priv->agp_initialized = 0;
+       if (dev_priv->vram_initialized) {
+               drm_mm_takedown(&dev_priv->vram_mm);
+               dev_priv->vram_initialized = 0;
+       }
+       if (dev_priv->agp_initialized) {
+               drm_mm_takedown(&dev_priv->agp_mm);
+               dev_priv->agp_initialized = 0;
+       }
        dev_priv->mmio = NULL;
        mutex_unlock(&dev->struct_mutex);
 }
 
 void sis_reclaim_buffers_locked(struct drm_device *dev,
-                               struct drm_file *file_priv)
+                               struct drm_file *file)
 {
-       drm_sis_private_t *dev_priv = dev->dev_private;
+       struct sis_file_private *file_priv = file->driver_priv;
+       struct sis_memblock *entry, *next;
 
        mutex_lock(&dev->struct_mutex);
-       if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
+       if (list_empty(&file_priv->obj_list)) {
                mutex_unlock(&dev->struct_mutex);
                return;
        }
@@ -314,7 +330,18 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
        if (dev->driver->dma_quiescent)
                dev->driver->dma_quiescent(dev);
 
-       drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
+
+       list_for_each_entry_safe(entry, next, &file_priv->obj_list,
+                                owner_list) {
+               list_del(&entry->owner_list);
+               if (drm_mm_node_allocated(&entry->mm_node))
+                       drm_mm_remove_node(&entry->mm_node);
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
+               else
+                       sis_free(entry->req.offset);
+#endif
+               kfree(entry);
+       }
        mutex_unlock(&dev->struct_mutex);
        return;
 }
index cda2991..1613c78 100644 (file)
@@ -41,20 +41,21 @@ static struct pci_device_id pciidlist[] = {
        tdfx_PCI_IDS
 };
 
+static const struct file_operations tdfx_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = drm_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .llseek = noop_llseek,
+};
+
 static struct drm_driver driver = {
        .driver_features = DRIVER_USE_MTRR,
        .reclaim_buffers = drm_core_reclaim_buffers,
-       .fops = {
-                .owner = THIS_MODULE,
-                .open = drm_open,
-                .release = drm_release,
-                .unlocked_ioctl = drm_ioctl,
-                .mmap = drm_mmap,
-                .poll = drm_poll,
-                .fasync = drm_fasync,
-                .llseek = noop_llseek,
-       },
-
+       .fops = &tdfx_driver_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = DRIVER_DATE,
index f3cf6f0..b2b33dd 100644 (file)
@@ -7,4 +7,8 @@ ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
        ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
        ttm_bo_manager.o
 
+ifeq ($(CONFIG_SWIOTLB),y)
+ttm-y += ttm_page_alloc_dma.o
+endif
+
 obj-$(CONFIG_DRM_TTM) += ttm.o
index 1c4a72f..747c141 100644 (file)
@@ -31,6 +31,7 @@
 
 #include "ttm/ttm_module.h"
 #include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_page_alloc.h"
 #ifdef TTM_HAS_AGP
 #include "ttm/ttm_placement.h"
 #include <linux/agp_backend.h>
 #include <asm/agp.h>
 
 struct ttm_agp_backend {
-       struct ttm_backend backend;
+       struct ttm_tt ttm;
        struct agp_memory *mem;
        struct agp_bridge_data *bridge;
 };
 
-static int ttm_agp_populate(struct ttm_backend *backend,
-                           unsigned long num_pages, struct page **pages,
-                           struct page *dummy_read_page,
-                           dma_addr_t *dma_addrs)
+static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
 {
-       struct ttm_agp_backend *agp_be =
-           container_of(backend, struct ttm_agp_backend, backend);
-       struct page **cur_page, **last_page = pages + num_pages;
+       struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+       struct drm_mm_node *node = bo_mem->mm_node;
        struct agp_memory *mem;
+       int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
+       unsigned i;
 
-       mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
+       mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY);
        if (unlikely(mem == NULL))
                return -ENOMEM;
 
        mem->page_count = 0;
-       for (cur_page = pages; cur_page < last_page; ++cur_page) {
-               struct page *page = *cur_page;
+       for (i = 0; i < ttm->num_pages; i++) {
+               struct page *page = ttm->pages[i];
+
                if (!page)
-                       page = dummy_read_page;
+                       page = ttm->dummy_read_page;
 
                mem->pages[mem->page_count++] = page;
        }
        agp_be->mem = mem;
-       return 0;
-}
-
-static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
-{
-       struct ttm_agp_backend *agp_be =
-           container_of(backend, struct ttm_agp_backend, backend);
-       struct drm_mm_node *node = bo_mem->mm_node;
-       struct agp_memory *mem = agp_be->mem;
-       int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
-       int ret;
 
        mem->is_flushed = 1;
        mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
@@ -90,50 +79,39 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
        return ret;
 }
 
-static int ttm_agp_unbind(struct ttm_backend *backend)
+static int ttm_agp_unbind(struct ttm_tt *ttm)
 {
-       struct ttm_agp_backend *agp_be =
-           container_of(backend, struct ttm_agp_backend, backend);
+       struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
 
-       if (agp_be->mem->is_bound)
-               return agp_unbind_memory(agp_be->mem);
-       else
-               return 0;
-}
-
-static void ttm_agp_clear(struct ttm_backend *backend)
-{
-       struct ttm_agp_backend *agp_be =
-           container_of(backend, struct ttm_agp_backend, backend);
-       struct agp_memory *mem = agp_be->mem;
-
-       if (mem) {
-               ttm_agp_unbind(backend);
-               agp_free_memory(mem);
+       if (agp_be->mem) {
+               if (agp_be->mem->is_bound)
+                       return agp_unbind_memory(agp_be->mem);
+               agp_free_memory(agp_be->mem);
+               agp_be->mem = NULL;
        }
-       agp_be->mem = NULL;
+       return 0;
 }
 
-static void ttm_agp_destroy(struct ttm_backend *backend)
+static void ttm_agp_destroy(struct ttm_tt *ttm)
 {
-       struct ttm_agp_backend *agp_be =
-           container_of(backend, struct ttm_agp_backend, backend);
+       struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
 
        if (agp_be->mem)
-               ttm_agp_clear(backend);
+               ttm_agp_unbind(ttm);
+       ttm_tt_fini(ttm);
        kfree(agp_be);
 }
 
 static struct ttm_backend_func ttm_agp_func = {
-       .populate = ttm_agp_populate,
-       .clear = ttm_agp_clear,
        .bind = ttm_agp_bind,
        .unbind = ttm_agp_unbind,
        .destroy = ttm_agp_destroy,
 };
 
-struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
-                                        struct agp_bridge_data *bridge)
+struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
+                                struct agp_bridge_data *bridge,
+                                unsigned long size, uint32_t page_flags,
+                                struct page *dummy_read_page)
 {
        struct ttm_agp_backend *agp_be;
 
@@ -143,10 +121,29 @@ struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
 
        agp_be->mem = NULL;
        agp_be->bridge = bridge;
-       agp_be->backend.func = &ttm_agp_func;
-       agp_be->backend.bdev = bdev;
-       return &agp_be->backend;
+       agp_be->ttm.func = &ttm_agp_func;
+
+       if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+               return NULL;
+       }
+
+       return &agp_be->ttm;
+}
+EXPORT_SYMBOL(ttm_agp_tt_create);
+
+int ttm_agp_tt_populate(struct ttm_tt *ttm)
+{
+       if (ttm->state != tt_unpopulated)
+               return 0;
+
+       return ttm_pool_populate(ttm);
+}
+EXPORT_SYMBOL(ttm_agp_tt_populate);
+
+void ttm_agp_tt_unpopulate(struct ttm_tt *ttm)
+{
+       ttm_pool_unpopulate(ttm);
 }
-EXPORT_SYMBOL(ttm_agp_backend_init);
+EXPORT_SYMBOL(ttm_agp_tt_unpopulate);
 
 #endif
index 0bb0f5f..2f0eab6 100644 (file)
@@ -137,6 +137,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
        struct ttm_buffer_object *bo =
            container_of(list_kref, struct ttm_buffer_object, list_kref);
        struct ttm_bo_device *bdev = bo->bdev;
+       size_t acc_size = bo->acc_size;
 
        BUG_ON(atomic_read(&bo->list_kref.refcount));
        BUG_ON(atomic_read(&bo->kref.refcount));
@@ -152,9 +153,9 @@ static void ttm_bo_release_list(struct kref *list_kref)
        if (bo->destroy)
                bo->destroy(bo);
        else {
-               ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
                kfree(bo);
        }
+       ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
 }
 
 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
@@ -337,27 +338,11 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
                if (zero_alloc)
                        page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
        case ttm_bo_type_kernel:
-               bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
-                                       page_flags, glob->dummy_read_page);
+               bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+                                                     page_flags, glob->dummy_read_page);
                if (unlikely(bo->ttm == NULL))
                        ret = -ENOMEM;
                break;
-       case ttm_bo_type_user:
-               bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
-                                       page_flags | TTM_PAGE_FLAG_USER,
-                                       glob->dummy_read_page);
-               if (unlikely(bo->ttm == NULL)) {
-                       ret = -ENOMEM;
-                       break;
-               }
-
-               ret = ttm_tt_set_user(bo->ttm, current,
-                                     bo->buffer_start, bo->num_pages);
-               if (unlikely(ret != 0)) {
-                       ttm_tt_destroy(bo->ttm);
-                       bo->ttm = NULL;
-               }
-               break;
        default:
                printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
                ret = -EINVAL;
@@ -419,9 +404,6 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
                }
        }
 
-       if (bdev->driver->move_notify)
-               bdev->driver->move_notify(bo, mem);
-
        if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
            !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
                ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
@@ -434,6 +416,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
        if (ret)
                goto out_err;
 
+       if (bdev->driver->move_notify)
+               bdev->driver->move_notify(bo, mem);
+
 moved:
        if (bo->evicted) {
                ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
@@ -472,6 +457,9 @@ out_err:
 
 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
 {
+       if (bo->bdev->driver->move_notify)
+               bo->bdev->driver->move_notify(bo, NULL);
+
        if (bo->ttm) {
                ttm_tt_unbind(bo->ttm);
                ttm_tt_destroy(bo->ttm);
@@ -913,16 +901,12 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
 }
 
 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
-                                bool disallow_fixed,
                                 uint32_t mem_type,
                                 uint32_t proposed_placement,
                                 uint32_t *masked_placement)
 {
        uint32_t cur_flags = ttm_bo_type_flags(mem_type);
 
-       if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
-               return false;
-
        if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
                return false;
 
@@ -967,7 +951,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                man = &bdev->man[mem_type];
 
                type_ok = ttm_bo_mt_compatible(man,
-                                               bo->type == ttm_bo_type_user,
                                                mem_type,
                                                placement->placement[i],
                                                &cur_flags);
@@ -1015,7 +998,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                if (!man->has_type)
                        continue;
                if (!ttm_bo_mt_compatible(man,
-                                               bo->type == ttm_bo_type_user,
                                                mem_type,
                                                placement->busy_placement[i],
                                                &cur_flags))
@@ -1185,6 +1167,17 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
 {
        int ret = 0;
        unsigned long num_pages;
+       struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+
+       ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+       if (ret) {
+               printk(KERN_ERR TTM_PFX "Out of kernel memory.\n");
+               if (destroy)
+                       (*destroy)(bo);
+               else
+                       kfree(bo);
+               return -ENOMEM;
+       }
 
        size += buffer_start & ~PAGE_MASK;
        num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -1255,14 +1248,34 @@ out_err:
 }
 EXPORT_SYMBOL(ttm_bo_init);
 
-static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
-                                unsigned long num_pages)
+size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
+                      unsigned long bo_size,
+                      unsigned struct_size)
 {
-       size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
-           PAGE_MASK;
+       unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
+       size_t size = 0;
 
-       return glob->ttm_bo_size + 2 * page_array_size;
+       size += ttm_round_pot(struct_size);
+       size += PAGE_ALIGN(npages * sizeof(void *));
+       size += ttm_round_pot(sizeof(struct ttm_tt));
+       return size;
 }
+EXPORT_SYMBOL(ttm_bo_acc_size);
+
+size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
+                          unsigned long bo_size,
+                          unsigned struct_size)
+{
+       unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
+       size_t size = 0;
+
+       size += ttm_round_pot(struct_size);
+       size += PAGE_ALIGN(npages * sizeof(void *));
+       size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
+       size += ttm_round_pot(sizeof(struct ttm_dma_tt));
+       return size;
+}
+EXPORT_SYMBOL(ttm_bo_dma_acc_size);
 
 int ttm_bo_create(struct ttm_bo_device *bdev,
                        unsigned long size,
@@ -1276,10 +1289,10 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
 {
        struct ttm_buffer_object *bo;
        struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+       size_t acc_size;
        int ret;
 
-       size_t acc_size =
-           ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+       acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
        ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
        if (unlikely(ret != 0))
                return ret;
@@ -1465,13 +1478,6 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
                goto out_no_shrink;
        }
 
-       glob->ttm_bo_extra_size =
-               ttm_round_pot(sizeof(struct ttm_tt)) +
-               ttm_round_pot(sizeof(struct ttm_backend));
-
-       glob->ttm_bo_size = glob->ttm_bo_extra_size +
-               ttm_round_pot(sizeof(struct ttm_buffer_object));
-
        atomic_set(&glob->bo_count, 0);
 
        ret = kobject_init_and_add(
index 082fcae..f8187ea 100644 (file)
@@ -244,7 +244,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
                                unsigned long page,
                                pgprot_t prot)
 {
-       struct page *d = ttm_tt_get_page(ttm, page);
+       struct page *d = ttm->pages[page];
        void *dst;
 
        if (!d)
@@ -281,7 +281,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
                                unsigned long page,
                                pgprot_t prot)
 {
-       struct page *s = ttm_tt_get_page(ttm, page);
+       struct page *s = ttm->pages[page];
        void *src;
 
        if (!s)
@@ -342,6 +342,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
        if (old_iomap == NULL && ttm == NULL)
                goto out2;
 
+       if (ttm->state == tt_unpopulated) {
+               ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+               if (ret)
+                       goto out1;
+       }
+
        add = 0;
        dir = 1;
 
@@ -439,6 +445,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        kref_init(&fbo->list_kref);
        kref_init(&fbo->kref);
        fbo->destroy = &ttm_transfered_destroy;
+       fbo->acc_size = 0;
 
        *new_obj = fbo;
        return 0;
@@ -502,10 +509,16 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
 {
        struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
        struct ttm_tt *ttm = bo->ttm;
-       struct page *d;
-       int i;
+       int ret;
 
        BUG_ON(!ttm);
+
+       if (ttm->state == tt_unpopulated) {
+               ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+               if (ret)
+                       return ret;
+       }
+
        if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
                /*
                 * We're mapping a single page, and the desired
@@ -513,18 +526,9 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
                 */
 
                map->bo_kmap_type = ttm_bo_map_kmap;
-               map->page = ttm_tt_get_page(ttm, start_page);
+               map->page = ttm->pages[start_page];
                map->virtual = kmap(map->page);
        } else {
-           /*
-            * Populate the part we're mapping;
-            */
-               for (i = start_page; i < start_page + num_pages; ++i) {
-                       d = ttm_tt_get_page(ttm, i);
-                       if (!d)
-                               return -ENOMEM;
-               }
-
                /*
                 * We need to use vmap to get the desired page protection
                 * or to make the buffer object look contiguous.
index 221b924..5441284 100644 (file)
@@ -174,18 +174,23 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
                    vm_get_page_prot(vma->vm_flags) :
                    ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
+
+               /* Allocate all page at once, most common usage */
+               if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
+                       retval = VM_FAULT_OOM;
+                       goto out_io_unlock;
+               }
        }
 
        /*
         * Speculatively prefault a number of pages. Only error on
         * first page.
         */
-
        for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
                if (bo->mem.bus.is_iomem)
                        pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
                else {
-                       page = ttm_tt_get_page(ttm, page_offset);
+                       page = ttm->pages[page_offset];
                        if (unlikely(!page && i == 0)) {
                                retval = VM_FAULT_OOM;
                                goto out_io_unlock;
index e70ddd8..9eba8e9 100644 (file)
@@ -395,6 +395,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
                       zone->name, (unsigned long long) zone->max_mem >> 10);
        }
        ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
+       ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
        return 0;
 out_no_zone:
        ttm_mem_global_release(glob);
@@ -409,6 +410,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
 
        /* let the page allocator first stop the shrink work. */
        ttm_page_alloc_fini();
+       ttm_dma_page_alloc_fini();
 
        flush_workqueue(glob->swap_queue);
        destroy_workqueue(glob->swap_queue);
index 727e93d..499debd 100644 (file)
@@ -619,8 +619,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
  * @return count of pages still required to fulfill the request.
  */
 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
-               struct list_head *pages, int ttm_flags,
-               enum ttm_caching_state cstate, unsigned count)
+                                       struct list_head *pages,
+                                       int ttm_flags,
+                                       enum ttm_caching_state cstate,
+                                       unsigned count)
 {
        unsigned long irq_flags;
        struct list_head *p;
@@ -660,17 +662,67 @@ out:
        return count;
 }
 
+/* Put all pages in pages list to correct pool to wait for reuse */
+static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
+                         enum ttm_caching_state cstate)
+{
+       unsigned long irq_flags;
+       struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+       unsigned i;
+
+       if (pool == NULL) {
+               /* No pool for this memory type so free the pages */
+               for (i = 0; i < npages; i++) {
+                       if (pages[i]) {
+                               if (page_count(pages[i]) != 1)
+                                       printk(KERN_ERR TTM_PFX
+                                              "Erroneous page count. "
+                                              "Leaking pages.\n");
+                               __free_page(pages[i]);
+                               pages[i] = NULL;
+                       }
+               }
+               return;
+       }
+
+       spin_lock_irqsave(&pool->lock, irq_flags);
+       for (i = 0; i < npages; i++) {
+               if (pages[i]) {
+                       if (page_count(pages[i]) != 1)
+                               printk(KERN_ERR TTM_PFX
+                                      "Erroneous page count. "
+                                      "Leaking pages.\n");
+                       list_add_tail(&pages[i]->lru, &pool->list);
+                       pages[i] = NULL;
+                       pool->npages++;
+               }
+       }
+       /* Check that we don't go over the pool limit */
+       npages = 0;
+       if (pool->npages > _manager->options.max_size) {
+               npages = pool->npages - _manager->options.max_size;
+               /* free at least NUM_PAGES_TO_ALLOC number of pages
+                * to reduce calls to set_memory_wb */
+               if (npages < NUM_PAGES_TO_ALLOC)
+                       npages = NUM_PAGES_TO_ALLOC;
+       }
+       spin_unlock_irqrestore(&pool->lock, irq_flags);
+       if (npages)
+               ttm_page_pool_free(pool, npages);
+}
+
 /*
  * On success pages list will hold count number of correctly
  * cached pages.
  */
-int ttm_get_pages(struct list_head *pages, int flags,
-                 enum ttm_caching_state cstate, unsigned count,
-                 dma_addr_t *dma_address)
+static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
+                        enum ttm_caching_state cstate)
 {
        struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+       struct list_head plist;
        struct page *p = NULL;
        gfp_t gfp_flags = GFP_USER;
+       unsigned count;
        int r;
 
        /* set zero flag for page allocation if required */
@@ -684,7 +736,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
                else
                        gfp_flags |= GFP_HIGHUSER;
 
-               for (r = 0; r < count; ++r) {
+               for (r = 0; r < npages; ++r) {
                        p = alloc_page(gfp_flags);
                        if (!p) {
 
@@ -693,87 +745,53 @@ int ttm_get_pages(struct list_head *pages, int flags,
                                return -ENOMEM;
                        }
 
-                       list_add(&p->lru, pages);
+                       pages[r] = p;
                }
                return 0;
        }
 
-
        /* combine zero flag to pool flags */
        gfp_flags |= pool->gfp_flags;
 
        /* First we take pages from the pool */
-       count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
+       INIT_LIST_HEAD(&plist);
+       npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
+       count = 0;
+       list_for_each_entry(p, &plist, lru) {
+               pages[count++] = p;
+       }
 
        /* clear the pages coming from the pool if requested */
        if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
-               list_for_each_entry(p, pages, lru) {
+               list_for_each_entry(p, &plist, lru) {
                        clear_page(page_address(p));
                }
        }
 
        /* If pool didn't have enough pages allocate new one. */
-       if (count > 0) {
+       if (npages > 0) {
                /* ttm_alloc_new_pages doesn't reference pool so we can run
                 * multiple requests in parallel.
                 **/
-               r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
+               INIT_LIST_HEAD(&plist);
+               r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
+               list_for_each_entry(p, &plist, lru) {
+                       pages[count++] = p;
+               }
                if (r) {
                        /* If there is any pages in the list put them back to
                         * the pool. */
                        printk(KERN_ERR TTM_PFX
                               "Failed to allocate extra pages "
                               "for large request.");
-                       ttm_put_pages(pages, 0, flags, cstate, NULL);
+                       ttm_put_pages(pages, count, flags, cstate);
                        return r;
                }
        }
 
-
        return 0;
 }
 
-/* Put all pages in pages list to correct pool to wait for reuse */
-void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
-                  enum ttm_caching_state cstate, dma_addr_t *dma_address)
-{
-       unsigned long irq_flags;
-       struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
-       struct page *p, *tmp;
-
-       if (pool == NULL) {
-               /* No pool for this memory type so free the pages */
-
-               list_for_each_entry_safe(p, tmp, pages, lru) {
-                       __free_page(p);
-               }
-               /* Make the pages list empty */
-               INIT_LIST_HEAD(pages);
-               return;
-       }
-       if (page_count == 0) {
-               list_for_each_entry_safe(p, tmp, pages, lru) {
-                       ++page_count;
-               }
-       }
-
-       spin_lock_irqsave(&pool->lock, irq_flags);
-       list_splice_init(pages, &pool->list);
-       pool->npages += page_count;
-       /* Check that we don't go over the pool limit */
-       page_count = 0;
-       if (pool->npages > _manager->options.max_size) {
-               page_count = pool->npages - _manager->options.max_size;
-               /* free at least NUM_PAGES_TO_ALLOC number of pages
-                * to reduce calls to set_memory_wb */
-               if (page_count < NUM_PAGES_TO_ALLOC)
-                       page_count = NUM_PAGES_TO_ALLOC;
-       }
-       spin_unlock_irqrestore(&pool->lock, irq_flags);
-       if (page_count)
-               ttm_page_pool_free(pool, page_count);
-}
-
 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
                char *name)
 {
@@ -836,6 +854,62 @@ void ttm_page_alloc_fini(void)
        _manager = NULL;
 }
 
+int ttm_pool_populate(struct ttm_tt *ttm)
+{
+       struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+       unsigned i;
+       int ret;
+
+       if (ttm->state != tt_unpopulated)
+               return 0;
+
+       for (i = 0; i < ttm->num_pages; ++i) {
+               ret = ttm_get_pages(&ttm->pages[i], 1,
+                                   ttm->page_flags,
+                                   ttm->caching_state);
+               if (ret != 0) {
+                       ttm_pool_unpopulate(ttm);
+                       return -ENOMEM;
+               }
+
+               ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
+                                               false, false);
+               if (unlikely(ret != 0)) {
+                       ttm_pool_unpopulate(ttm);
+                       return -ENOMEM;
+               }
+       }
+
+       if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+               ret = ttm_tt_swapin(ttm);
+               if (unlikely(ret != 0)) {
+                       ttm_pool_unpopulate(ttm);
+                       return ret;
+               }
+       }
+
+       ttm->state = tt_unbound;
+       return 0;
+}
+EXPORT_SYMBOL(ttm_pool_populate);
+
+void ttm_pool_unpopulate(struct ttm_tt *ttm)
+{
+       unsigned i;
+
+       for (i = 0; i < ttm->num_pages; ++i) {
+               if (ttm->pages[i]) {
+                       ttm_mem_global_free_page(ttm->glob->mem_glob,
+                                                ttm->pages[i]);
+                       ttm_put_pages(&ttm->pages[i], 1,
+                                     ttm->page_flags,
+                                     ttm->caching_state);
+               }
+       }
+       ttm->state = tt_unpopulated;
+}
+EXPORT_SYMBOL(ttm_pool_unpopulate);
+
 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
 {
        struct ttm_page_pool *p;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
new file mode 100644 (file)
index 0000000..37ead69
--- /dev/null
@@ -0,0 +1,1143 @@
+/*
+ * Copyright 2011 (c) Oracle Corp.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+ */
+
+/*
+ * A simple DMA pool losely based on dmapool.c. It has certain advantages
+ * over the DMA pools:
+ * - Pool collects resently freed pages for reuse (and hooks up to
+ *   the shrinker).
+ * - Tracks currently in use pages
+ * - Tracks whether the page is UC, WB or cached (and reverts to WB
+ *   when freed).
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/seq_file.h> /* for seq_printf */
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/kthread.h>
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_page_alloc.h"
+#ifdef TTM_HAS_AGP
+#include <asm/agp.h>
+#endif
+
+#define NUM_PAGES_TO_ALLOC             (PAGE_SIZE/sizeof(struct page *))
+#define SMALL_ALLOCATION               4
+#define FREE_ALL_PAGES                 (~0U)
+/* times are in msecs */
+#define IS_UNDEFINED                   (0)
+#define IS_WC                          (1<<1)
+#define IS_UC                          (1<<2)
+#define IS_CACHED                      (1<<3)
+#define IS_DMA32                       (1<<4)
+
+enum pool_type {
+       POOL_IS_UNDEFINED,
+       POOL_IS_WC = IS_WC,
+       POOL_IS_UC = IS_UC,
+       POOL_IS_CACHED = IS_CACHED,
+       POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
+       POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
+       POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
+};
+/*
+ * The pool structure. There are usually six pools:
+ *  - generic (not restricted to DMA32):
+ *      - write combined, uncached, cached.
+ *  - dma32 (up to 2^32 - so up 4GB):
+ *      - write combined, uncached, cached.
+ * for each 'struct device'. The 'cached' is for pages that are actively used.
+ * The other ones can be shrunk by the shrinker API if neccessary.
+ * @pools: The 'struct device->dma_pools' link.
+ * @type: Type of the pool
+ * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
+ * used with irqsave/irqrestore variants because pool allocator maybe called
+ * from delayed work.
+ * @inuse_list: Pool of pages that are in use. The order is very important and
+ *   it is in the order that the TTM pages that are put back are in.
+ * @free_list: Pool of pages that are free to be used. No order requirements.
+ * @dev: The device that is associated with these pools.
+ * @size: Size used during DMA allocation.
+ * @npages_free: Count of available pages for re-use.
+ * @npages_in_use: Count of pages that are in use.
+ * @nfrees: Stats when pool is shrinking.
+ * @nrefills: Stats when the pool is grown.
+ * @gfp_flags: Flags to pass for alloc_page.
+ * @name: Name of the pool.
+ * @dev_name: Name derieved from dev - similar to how dev_info works.
+ *   Used during shutdown as the dev_info during release is unavailable.
+ */
+struct dma_pool {
+       struct list_head pools; /* The 'struct device->dma_pools link */
+       enum pool_type type;
+       spinlock_t lock;
+       struct list_head inuse_list;
+       struct list_head free_list;
+       struct device *dev;
+       unsigned size;
+       unsigned npages_free;
+       unsigned npages_in_use;
+       unsigned long nfrees; /* Stats when shrunk. */
+       unsigned long nrefills; /* Stats when grown. */
+       gfp_t gfp_flags;
+       char name[13]; /* "cached dma32" */
+       char dev_name[64]; /* Constructed from dev */
+};
+
+/*
+ * The accounting page keeping track of the allocated page along with
+ * the DMA address.
+ * @page_list: The link to the 'page_list' in 'struct dma_pool'.
+ * @vaddr: The virtual address of the page
+ * @dma: The bus address of the page. If the page is not allocated
+ *   via the DMA API, it will be -1.
+ */
+struct dma_page {
+       struct list_head page_list;
+       void *vaddr;
+       struct page *p;
+       dma_addr_t dma;
+};
+
+/*
+ * Limits for the pool. They are handled without locks because only place where
+ * they may change is in sysfs store. They won't have immediate effect anyway
+ * so forcing serialization to access them is pointless.
+ */
+
+struct ttm_pool_opts {
+       unsigned        alloc_size;
+       unsigned        max_size;
+       unsigned        small;
+};
+
+/*
+ * Contains the list of all of the 'struct device' and their corresponding
+ * DMA pools. Guarded by _mutex->lock.
+ * @pools: The link to 'struct ttm_pool_manager->pools'
+ * @dev: The 'struct device' associated with the 'pool'
+ * @pool: The 'struct dma_pool' associated with the 'dev'
+ */
+struct device_pools {
+       struct list_head pools;
+       struct device *dev;
+       struct dma_pool *pool;
+};
+
+/*
+ * struct ttm_pool_manager - Holds memory pools for fast allocation
+ *
+ * @lock: Lock used when adding/removing from pools
+ * @pools: List of 'struct device' and 'struct dma_pool' tuples.
+ * @options: Limits for the pool.
+ * @npools: Total amount of pools in existence.
+ * @shrinker: The structure used by [un|]register_shrinker
+ */
+struct ttm_pool_manager {
+       struct mutex            lock;
+       struct list_head        pools;
+       struct ttm_pool_opts    options;
+       unsigned                npools;
+       struct shrinker         mm_shrink;
+       struct kobject          kobj;
+};
+
+static struct ttm_pool_manager *_manager;
+
+static struct attribute ttm_page_pool_max = {
+       .name = "pool_max_size",
+       .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_small = {
+       .name = "pool_small_allocation",
+       .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_alloc_size = {
+       .name = "pool_allocation_size",
+       .mode = S_IRUGO | S_IWUSR
+};
+
+static struct attribute *ttm_pool_attrs[] = {
+       &ttm_page_pool_max,
+       &ttm_page_pool_small,
+       &ttm_page_pool_alloc_size,
+       NULL
+};
+
+static void ttm_pool_kobj_release(struct kobject *kobj)
+{
+       struct ttm_pool_manager *m =
+               container_of(kobj, struct ttm_pool_manager, kobj);
+       kfree(m);
+}
+
+static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
+                             const char *buffer, size_t size)
+{
+       struct ttm_pool_manager *m =
+               container_of(kobj, struct ttm_pool_manager, kobj);
+       int chars;
+       unsigned val;
+       chars = sscanf(buffer, "%u", &val);
+       if (chars == 0)
+               return size;
+
+       /* Convert kb to number of pages */
+       val = val / (PAGE_SIZE >> 10);
+
+       if (attr == &ttm_page_pool_max)
+               m->options.max_size = val;
+       else if (attr == &ttm_page_pool_small)
+               m->options.small = val;
+       else if (attr == &ttm_page_pool_alloc_size) {
+               if (val > NUM_PAGES_TO_ALLOC*8) {
+                       printk(KERN_ERR TTM_PFX
+                              "Setting allocation size to %lu "
+                              "is not allowed. Recommended size is "
+                              "%lu\n",
+                              NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+                              NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+                       return size;
+               } else if (val > NUM_PAGES_TO_ALLOC) {
+                       printk(KERN_WARNING TTM_PFX
+                              "Setting allocation size to "
+                              "larger than %lu is not recommended.\n",
+                              NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+               }
+               m->options.alloc_size = val;
+       }
+
+       return size;
+}
+
+static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
+                            char *buffer)
+{
+       struct ttm_pool_manager *m =
+               container_of(kobj, struct ttm_pool_manager, kobj);
+       unsigned val = 0;
+
+       if (attr == &ttm_page_pool_max)
+               val = m->options.max_size;
+       else if (attr == &ttm_page_pool_small)
+               val = m->options.small;
+       else if (attr == &ttm_page_pool_alloc_size)
+               val = m->options.alloc_size;
+
+       val = val * (PAGE_SIZE >> 10);
+
+       return snprintf(buffer, PAGE_SIZE, "%u\n", val);
+}
+
+static const struct sysfs_ops ttm_pool_sysfs_ops = {
+       .show = &ttm_pool_show,
+       .store = &ttm_pool_store,
+};
+
+static struct kobj_type ttm_pool_kobj_type = {
+       .release = &ttm_pool_kobj_release,
+       .sysfs_ops = &ttm_pool_sysfs_ops,
+       .default_attrs = ttm_pool_attrs,
+};
+
+#ifndef CONFIG_X86
+static int set_pages_array_wb(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+       int i;
+
+       for (i = 0; i < addrinarray; i++)
+               unmap_page_from_agp(pages[i]);
+#endif
+       return 0;
+}
+
+static int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+       int i;
+
+       for (i = 0; i < addrinarray; i++)
+               map_page_into_agp(pages[i]);
+#endif
+       return 0;
+}
+
+static int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+       int i;
+
+       for (i = 0; i < addrinarray; i++)
+               map_page_into_agp(pages[i]);
+#endif
+       return 0;
+}
+#endif /* for !CONFIG_X86 */
+
+static int ttm_set_pages_caching(struct dma_pool *pool,
+                                struct page **pages, unsigned cpages)
+{
+       int r = 0;
+       /* Set page caching */
+       if (pool->type & IS_UC) {
+               r = set_pages_array_uc(pages, cpages);
+               if (r)
+                       pr_err(TTM_PFX
+                              "%s: Failed to set %d pages to uc!\n",
+                              pool->dev_name, cpages);
+       }
+       if (pool->type & IS_WC) {
+               r = set_pages_array_wc(pages, cpages);
+               if (r)
+                       pr_err(TTM_PFX
+                              "%s: Failed to set %d pages to wc!\n",
+                              pool->dev_name, cpages);
+       }
+       return r;
+}
+
+static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
+{
+       dma_addr_t dma = d_page->dma;
+       dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
+
+       kfree(d_page);
+       d_page = NULL;
+}
+static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
+{
+       struct dma_page *d_page;
+
+       d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
+       if (!d_page)
+               return NULL;
+
+       d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
+                                          &d_page->dma,
+                                          pool->gfp_flags);
+       if (d_page->vaddr)
+               d_page->p = virt_to_page(d_page->vaddr);
+       else {
+               kfree(d_page);
+               d_page = NULL;
+       }
+       return d_page;
+}
+static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
+{
+       enum pool_type type = IS_UNDEFINED;
+
+       if (flags & TTM_PAGE_FLAG_DMA32)
+               type |= IS_DMA32;
+       if (cstate == tt_cached)
+               type |= IS_CACHED;
+       else if (cstate == tt_uncached)
+               type |= IS_UC;
+       else
+               type |= IS_WC;
+
+       return type;
+}
+
+static void ttm_pool_update_free_locked(struct dma_pool *pool,
+                                       unsigned freed_pages)
+{
+       pool->npages_free -= freed_pages;
+       pool->nfrees += freed_pages;
+
+}
+
+/* set memory back to wb and free the pages. */
+static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
+                             struct page *pages[], unsigned npages)
+{
+       struct dma_page *d_page, *tmp;
+
+       /* Don't set WB on WB page pool. */
+       if (npages && !(pool->type & IS_CACHED) &&
+           set_pages_array_wb(pages, npages))
+               pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n",
+                       pool->dev_name, npages);
+
+       list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
+               list_del(&d_page->page_list);
+               __ttm_dma_free_page(pool, d_page);
+       }
+}
+
+static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
+{
+       /* Don't set WB on WB page pool. */
+       if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
+               pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n",
+                       pool->dev_name, 1);
+
+       list_del(&d_page->page_list);
+       __ttm_dma_free_page(pool, d_page);
+}
+
+/*
+ * Free pages from pool.
+ *
+ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
+ * number of pages in one go.
+ *
+ * @pool: to free the pages from
+ * @nr_free: If set to true will free all pages in pool
+ **/
+static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
+{
+       unsigned long irq_flags;
+       struct dma_page *dma_p, *tmp;
+       struct page **pages_to_free;
+       struct list_head d_pages;
+       unsigned freed_pages = 0,
+                npages_to_free = nr_free;
+
+       if (NUM_PAGES_TO_ALLOC < nr_free)
+               npages_to_free = NUM_PAGES_TO_ALLOC;
+#if 0
+       if (nr_free > 1) {
+               pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
+                       pool->dev_name, pool->name, current->pid,
+                       npages_to_free, nr_free);
+       }
+#endif
+       pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+                       GFP_KERNEL);
+
+       if (!pages_to_free) {
+               pr_err(TTM_PFX
+                      "%s: Failed to allocate memory for pool free operation.\n",
+                       pool->dev_name);
+               return 0;
+       }
+       INIT_LIST_HEAD(&d_pages);
+restart:
+       spin_lock_irqsave(&pool->lock, irq_flags);
+
+       /* We picking the oldest ones off the list */
+       list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
+                                        page_list) {
+               if (freed_pages >= npages_to_free)
+                       break;
+
+               /* Move the dma_page from one list to another. */
+               list_move(&dma_p->page_list, &d_pages);
+
+               pages_to_free[freed_pages++] = dma_p->p;
+               /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
+               if (freed_pages >= NUM_PAGES_TO_ALLOC) {
+
+                       ttm_pool_update_free_locked(pool, freed_pages);
+                       /**
+                        * Because changing page caching is costly
+                        * we unlock the pool to prevent stalling.
+                        */
+                       spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+                       ttm_dma_pages_put(pool, &d_pages, pages_to_free,
+                                         freed_pages);
+
+                       INIT_LIST_HEAD(&d_pages);
+
+                       if (likely(nr_free != FREE_ALL_PAGES))
+                               nr_free -= freed_pages;
+
+                       if (NUM_PAGES_TO_ALLOC >= nr_free)
+                               npages_to_free = nr_free;
+                       else
+                               npages_to_free = NUM_PAGES_TO_ALLOC;
+
+                       freed_pages = 0;
+
+                       /* free all so restart the processing */
+                       if (nr_free)
+                               goto restart;
+
+                       /* Not allowed to fall through or break because
+                        * following context is inside spinlock while we are
+                        * outside here.
+                        */
+                       goto out;
+
+               }
+       }
+
+       /* remove range of pages from the pool */
+       if (freed_pages) {
+               ttm_pool_update_free_locked(pool, freed_pages);
+               nr_free -= freed_pages;
+       }
+
+       spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+       if (freed_pages)
+               ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
+out:
+       kfree(pages_to_free);
+       return nr_free;
+}
+
+static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
+{
+       struct device_pools *p;
+       struct dma_pool *pool;
+
+       if (!dev)
+               return;
+
+       mutex_lock(&_manager->lock);
+       list_for_each_entry_reverse(p, &_manager->pools, pools) {
+               if (p->dev != dev)
+                       continue;
+               pool = p->pool;
+               if (pool->type != type)
+                       continue;
+
+               list_del(&p->pools);
+               kfree(p);
+               _manager->npools--;
+               break;
+       }
+       list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
+               if (pool->type != type)
+                       continue;
+               /* Takes a spinlock.. */
+               ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
+               WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
+               /* This code path is called after _all_ references to the
+                * struct device has been dropped - so nobody should be
+                * touching it. In case somebody is trying to _add_ we are
+                * guarded by the mutex. */
+               list_del(&pool->pools);
+               kfree(pool);
+               break;
+       }
+       mutex_unlock(&_manager->lock);
+}
+
+/*
+ * On free-ing of the 'struct device' this deconstructor is run.
+ * Albeit the pool might have already been freed earlier.
+ */
+static void ttm_dma_pool_release(struct device *dev, void *res)
+{
+       struct dma_pool *pool = *(struct dma_pool **)res;
+
+       if (pool)
+               ttm_dma_free_pool(dev, pool->type);
+}
+
+static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
+{
+       return *(struct dma_pool **)res == match_data;
+}
+
+static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
+                                         enum pool_type type)
+{
+       char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
+       enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
+       struct device_pools *sec_pool = NULL;
+       struct dma_pool *pool = NULL, **ptr;
+       unsigned i;
+       int ret = -ENODEV;
+       char *p;
+
+       if (!dev)
+               return NULL;
+
+       ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return NULL;
+
+       ret = -ENOMEM;
+
+       pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
+                           dev_to_node(dev));
+       if (!pool)
+               goto err_mem;
+
+       sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
+                               dev_to_node(dev));
+       if (!sec_pool)
+               goto err_mem;
+
+       INIT_LIST_HEAD(&sec_pool->pools);
+       sec_pool->dev = dev;
+       sec_pool->pool =  pool;
+
+       INIT_LIST_HEAD(&pool->free_list);
+       INIT_LIST_HEAD(&pool->inuse_list);
+       INIT_LIST_HEAD(&pool->pools);
+       spin_lock_init(&pool->lock);
+       pool->dev = dev;
+       pool->npages_free = pool->npages_in_use = 0;
+       pool->nfrees = 0;
+       pool->gfp_flags = flags;
+       pool->size = PAGE_SIZE;
+       pool->type = type;
+       pool->nrefills = 0;
+       p = pool->name;
+       for (i = 0; i < 5; i++) {
+               if (type & t[i]) {
+                       p += snprintf(p, sizeof(pool->name) - (p - pool->name),
+                                     "%s", n[i]);
+               }
+       }
+       *p = 0;
+       /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
+        * - the kobj->name has already been deallocated.*/
+       snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
+                dev_driver_string(dev), dev_name(dev));
+       mutex_lock(&_manager->lock);
+       /* You can get the dma_pool from either the global: */
+       list_add(&sec_pool->pools, &_manager->pools);
+       _manager->npools++;
+       /* or from 'struct device': */
+       list_add(&pool->pools, &dev->dma_pools);
+       mutex_unlock(&_manager->lock);
+
+       *ptr = pool;
+       devres_add(dev, ptr);
+
+       return pool;
+err_mem:
+       devres_free(ptr);
+       kfree(sec_pool);
+       kfree(pool);
+       return ERR_PTR(ret);
+}
+
+static struct dma_pool *ttm_dma_find_pool(struct device *dev,
+                                         enum pool_type type)
+{
+       struct dma_pool *pool, *tmp, *found = NULL;
+
+       if (type == IS_UNDEFINED)
+               return found;
+
+       /* NB: We iterate on the 'struct dev' which has no spinlock, but
+        * it does have a kref which we have taken. The kref is taken during
+        * graphic driver loading - in the drm_pci_init it calls either
+        * pci_dev_get or pci_register_driver which both end up taking a kref
+        * on 'struct device'.
+        *
+        * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
+        * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
+        * thing is at that point of time there are no pages associated with the
+        * driver so this function will not be called.
+        */
+       list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
+               if (pool->type != type)
+                       continue;
+               found = pool;
+               break;
+       }
+       return found;
+}
+
+/*
+ * Free pages the pages that failed to change the caching state. If there
+ * are pages that have changed their caching state already put them to the
+ * pool.
+ */
+static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
+                                                struct list_head *d_pages,
+                                                struct page **failed_pages,
+                                                unsigned cpages)
+{
+       struct dma_page *d_page, *tmp;
+       struct page *p;
+       unsigned i = 0;
+
+       p = failed_pages[0];
+       if (!p)
+               return;
+       /* Find the failed page. */
+       list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
+               if (d_page->p != p)
+                       continue;
+               /* .. and then progress over the full list. */
+               list_del(&d_page->page_list);
+               __ttm_dma_free_page(pool, d_page);
+               if (++i < cpages)
+                       p = failed_pages[i];
+               else
+                       break;
+       }
+
+}
+
+/*
+ * Allocate 'count' pages, and put 'need' number of them on the
+ * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
+ * The full list of pages should also be on 'd_pages'.
+ * We return zero for success, and negative numbers as errors.
+ */
+static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
+                                       struct list_head *d_pages,
+                                       unsigned count)
+{
+       struct page **caching_array;
+       struct dma_page *dma_p;
+       struct page *p;
+       int r = 0;
+       unsigned i, cpages;
+       unsigned max_cpages = min(count,
+                       (unsigned)(PAGE_SIZE/sizeof(struct page *)));
+
+       /* allocate array for page caching change */
+       caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
+
+       if (!caching_array) {
+               pr_err(TTM_PFX
+                      "%s: Unable to allocate table for new pages.",
+                       pool->dev_name);
+               return -ENOMEM;
+       }
+
+       if (count > 1) {
+               pr_debug("%s: (%s:%d) Getting %d pages\n",
+                       pool->dev_name, pool->name, current->pid,
+                       count);
+       }
+
+       for (i = 0, cpages = 0; i < count; ++i) {
+               dma_p = __ttm_dma_alloc_page(pool);
+               if (!dma_p) {
+                       pr_err(TTM_PFX "%s: Unable to get page %u.\n",
+                               pool->dev_name, i);
+
+                       /* store already allocated pages in the pool after
+                        * setting the caching state */
+                       if (cpages) {
+                               r = ttm_set_pages_caching(pool, caching_array,
+                                                         cpages);
+                               if (r)
+                                       ttm_dma_handle_caching_state_failure(
+                                               pool, d_pages, caching_array,
+                                               cpages);
+                       }
+                       r = -ENOMEM;
+                       goto out;
+               }
+               p = dma_p->p;
+#ifdef CONFIG_HIGHMEM
+               /* gfp flags of highmem page should never be dma32 so we
+                * we should be fine in such case
+                */
+               if (!PageHighMem(p))
+#endif
+               {
+                       caching_array[cpages++] = p;
+                       if (cpages == max_cpages) {
+                               /* Note: Cannot hold the spinlock */
+                               r = ttm_set_pages_caching(pool, caching_array,
+                                                cpages);
+                               if (r) {
+                                       ttm_dma_handle_caching_state_failure(
+                                               pool, d_pages, caching_array,
+                                               cpages);
+                                       goto out;
+                               }
+                               cpages = 0;
+                       }
+               }
+               list_add(&dma_p->page_list, d_pages);
+       }
+
+       if (cpages) {
+               r = ttm_set_pages_caching(pool, caching_array, cpages);
+               if (r)
+                       ttm_dma_handle_caching_state_failure(pool, d_pages,
+                                       caching_array, cpages);
+       }
+out:
+       kfree(caching_array);
+       return r;
+}
+
+/*
+ * @return count of pages still required to fulfill the request.
+ */
+static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
+                                        unsigned long *irq_flags)
+{
+       unsigned count = _manager->options.small;
+       int r = pool->npages_free;
+
+       if (count > pool->npages_free) {
+               struct list_head d_pages;
+
+               INIT_LIST_HEAD(&d_pages);
+
+               spin_unlock_irqrestore(&pool->lock, *irq_flags);
+
+               /* Returns how many more are neccessary to fulfill the
+                * request. */
+               r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
+
+               spin_lock_irqsave(&pool->lock, *irq_flags);
+               if (!r) {
+                       /* Add the fresh to the end.. */
+                       list_splice(&d_pages, &pool->free_list);
+                       ++pool->nrefills;
+                       pool->npages_free += count;
+                       r = count;
+               } else {
+                       struct dma_page *d_page;
+                       unsigned cpages = 0;
+
+                       pr_err(TTM_PFX "%s: Failed to fill %s pool (r:%d)!\n",
+                               pool->dev_name, pool->name, r);
+
+                       list_for_each_entry(d_page, &d_pages, page_list) {
+                               cpages++;
+                       }
+                       list_splice_tail(&d_pages, &pool->free_list);
+                       pool->npages_free += cpages;
+                       r = cpages;
+               }
+       }
+       return r;
+}
+
+/*
+ * @return count of pages still required to fulfill the request.
+ * The populate list is actually a stack (not that is matters as TTM
+ * allocates one page at a time.
+ */
+static int ttm_dma_pool_get_pages(struct dma_pool *pool,
+                                 struct ttm_dma_tt *ttm_dma,
+                                 unsigned index)
+{
+       struct dma_page *d_page;
+       struct ttm_tt *ttm = &ttm_dma->ttm;
+       unsigned long irq_flags;
+       int count, r = -ENOMEM;
+
+       spin_lock_irqsave(&pool->lock, irq_flags);
+       count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
+       if (count) {
+               d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
+               ttm->pages[index] = d_page->p;
+               ttm_dma->dma_address[index] = d_page->dma;
+               list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
+               r = 0;
+               pool->npages_in_use += 1;
+               pool->npages_free -= 1;
+       }
+       spin_unlock_irqrestore(&pool->lock, irq_flags);
+       return r;
+}
+
+/*
+ * On success pages list will hold count number of correctly
+ * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
+ */
+int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+{
+       struct ttm_tt *ttm = &ttm_dma->ttm;
+       struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+       struct dma_pool *pool;
+       enum pool_type type;
+       unsigned i;
+       gfp_t gfp_flags;
+       int ret;
+
+       if (ttm->state != tt_unpopulated)
+               return 0;
+
+       type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+       if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
+               gfp_flags = GFP_USER | GFP_DMA32;
+       else
+               gfp_flags = GFP_HIGHUSER;
+       if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+               gfp_flags |= __GFP_ZERO;
+
+       pool = ttm_dma_find_pool(dev, type);
+       if (!pool) {
+               pool = ttm_dma_pool_init(dev, gfp_flags, type);
+               if (IS_ERR_OR_NULL(pool)) {
+                       return -ENOMEM;
+               }
+       }
+
+       INIT_LIST_HEAD(&ttm_dma->pages_list);
+       for (i = 0; i < ttm->num_pages; ++i) {
+               ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
+               if (ret != 0) {
+                       ttm_dma_unpopulate(ttm_dma, dev);
+                       return -ENOMEM;
+               }
+
+               ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
+                                               false, false);
+               if (unlikely(ret != 0)) {
+                       ttm_dma_unpopulate(ttm_dma, dev);
+                       return -ENOMEM;
+               }
+       }
+
+       if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+               ret = ttm_tt_swapin(ttm);
+               if (unlikely(ret != 0)) {
+                       ttm_dma_unpopulate(ttm_dma, dev);
+                       return ret;
+               }
+       }
+
+       ttm->state = tt_unbound;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_populate);
+
+/* Get good estimation how many pages are free in pools */
+static int ttm_dma_pool_get_num_unused_pages(void)
+{
+       struct device_pools *p;
+       unsigned total = 0;
+
+       mutex_lock(&_manager->lock);
+       list_for_each_entry(p, &_manager->pools, pools)
+               total += p->pool->npages_free;
+       mutex_unlock(&_manager->lock);
+       return total;
+}
+
+/* Put all pages in pages list to correct pool to wait for reuse */
+void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+{
+       struct ttm_tt *ttm = &ttm_dma->ttm;
+       struct dma_pool *pool;
+       struct dma_page *d_page, *next;
+       enum pool_type type;
+       bool is_cached = false;
+       unsigned count = 0, i, npages = 0;
+       unsigned long irq_flags;
+
+       type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+       pool = ttm_dma_find_pool(dev, type);
+       if (!pool) {
+               WARN_ON(!pool);
+               return;
+       }
+       is_cached = (ttm_dma_find_pool(pool->dev,
+                    ttm_to_type(ttm->page_flags, tt_cached)) == pool);
+
+       /* make sure pages array match list and count number of pages */
+       list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
+               ttm->pages[count] = d_page->p;
+               count++;
+       }
+
+       spin_lock_irqsave(&pool->lock, irq_flags);
+       pool->npages_in_use -= count;
+       if (is_cached) {
+               pool->nfrees += count;
+       } else {
+               pool->npages_free += count;
+               list_splice(&ttm_dma->pages_list, &pool->free_list);
+               npages = count;
+               if (pool->npages_free > _manager->options.max_size) {
+                       npages = pool->npages_free - _manager->options.max_size;
+                       /* free at least NUM_PAGES_TO_ALLOC number of pages
+                        * to reduce calls to set_memory_wb */
+                       if (npages < NUM_PAGES_TO_ALLOC)
+                               npages = NUM_PAGES_TO_ALLOC;
+               }
+       }
+       spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+       if (is_cached) {
+               list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
+                       ttm_mem_global_free_page(ttm->glob->mem_glob,
+                                                d_page->p);
+                       ttm_dma_page_put(pool, d_page);
+               }
+       } else {
+               for (i = 0; i < count; i++) {
+                       ttm_mem_global_free_page(ttm->glob->mem_glob,
+                                                ttm->pages[i]);
+               }
+       }
+
+       INIT_LIST_HEAD(&ttm_dma->pages_list);
+       for (i = 0; i < ttm->num_pages; i++) {
+               ttm->pages[i] = NULL;
+               ttm_dma->dma_address[i] = 0;
+       }
+
+       /* shrink pool if necessary (only on !is_cached pools)*/
+       if (npages)
+               ttm_dma_page_pool_free(pool, npages);
+       ttm->state = tt_unpopulated;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
+
+/**
+ * Callback for mm to request pool to reduce number of page held.
+ */
+static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
+                                 struct shrink_control *sc)
+{
+       static atomic_t start_pool = ATOMIC_INIT(0);
+       unsigned idx = 0;
+       unsigned pool_offset = atomic_add_return(1, &start_pool);
+       unsigned shrink_pages = sc->nr_to_scan;
+       struct device_pools *p;
+
+       if (list_empty(&_manager->pools))
+               return 0;
+
+       mutex_lock(&_manager->lock);
+       pool_offset = pool_offset % _manager->npools;
+       list_for_each_entry(p, &_manager->pools, pools) {
+               unsigned nr_free;
+
+               if (!p->dev)
+                       continue;
+               if (shrink_pages == 0)
+                       break;
+               /* Do it in round-robin fashion. */
+               if (++idx < pool_offset)
+                       continue;
+               nr_free = shrink_pages;
+               shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
+               pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
+                       p->pool->dev_name, p->pool->name, current->pid, nr_free,
+                       shrink_pages);
+       }
+       mutex_unlock(&_manager->lock);
+       /* return estimated number of unused pages in pool */
+       return ttm_dma_pool_get_num_unused_pages();
+}
+
+static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+{
+       manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
+       manager->mm_shrink.seeks = 1;
+       register_shrinker(&manager->mm_shrink);
+}
+
+static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
+{
+       unregister_shrinker(&manager->mm_shrink);
+}
+
+int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+{
+       int ret = -ENOMEM;
+
+       WARN_ON(_manager);
+
+       printk(KERN_INFO TTM_PFX "Initializing DMA pool allocator.\n");
+
+       _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
+       if (!_manager)
+               goto err_manager;
+
+       mutex_init(&_manager->lock);
+       INIT_LIST_HEAD(&_manager->pools);
+
+       _manager->options.max_size = max_pages;
+       _manager->options.small = SMALL_ALLOCATION;
+       _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
+
+       /* This takes care of auto-freeing the _manager */
+       ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
+                                  &glob->kobj, "dma_pool");
+       if (unlikely(ret != 0)) {
+               kobject_put(&_manager->kobj);
+               goto err;
+       }
+       ttm_dma_pool_mm_shrink_init(_manager);
+       return 0;
+err_manager:
+       kfree(_manager);
+       _manager = NULL;
+err:
+       return ret;
+}
+
+void ttm_dma_page_alloc_fini(void)
+{
+       struct device_pools *p, *t;
+
+       printk(KERN_INFO TTM_PFX "Finalizing DMA pool allocator.\n");
+       ttm_dma_pool_mm_shrink_fini(_manager);
+
+       list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
+               dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
+                       current->pid);
+               WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
+                       ttm_dma_pool_match, p->pool));
+               ttm_dma_free_pool(p->dev, p->pool->type);
+       }
+       kobject_put(&_manager->kobj);
+       _manager = NULL;
+}
+
+int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+       struct device_pools *p;
+       struct dma_pool *pool = NULL;
+       char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
+                    "name", "virt", "busaddr"};
+
+       if (!_manager) {
+               seq_printf(m, "No pool allocator running.\n");
+               return 0;
+       }
+       seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
+                  h[0], h[1], h[2], h[3], h[4], h[5]);
+       mutex_lock(&_manager->lock);
+       list_for_each_entry(p, &_manager->pools, pools) {
+               struct device *dev = p->dev;
+               if (!dev)
+                       continue;
+               pool = p->pool;
+               seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
+                               pool->name, pool->nrefills,
+                               pool->nfrees, pool->npages_in_use,
+                               pool->npages_free,
+                               pool->dev_name);
+       }
+       mutex_unlock(&_manager->lock);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
index f9cc548..2f75d20 100644 (file)
 #include "ttm/ttm_placement.h"
 #include "ttm/ttm_page_alloc.h"
 
-static int ttm_tt_swapin(struct ttm_tt *ttm);
-
 /**
  * Allocates storage for pointers to the pages that back the ttm.
  */
 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
 {
-       ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
-       ttm->dma_address = drm_calloc_large(ttm->num_pages,
-                                           sizeof(*ttm->dma_address));
-}
-
-static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
-{
-       drm_free_large(ttm->pages);
-       ttm->pages = NULL;
-       drm_free_large(ttm->dma_address);
-       ttm->dma_address = NULL;
-}
-
-static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
-{
-       int write;
-       int dirty;
-       struct page *page;
-       int i;
-       struct ttm_backend *be = ttm->be;
-
-       BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
-       write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
-       dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
-
-       if (be)
-               be->func->clear(be);
-
-       for (i = 0; i < ttm->num_pages; ++i) {
-               page = ttm->pages[i];
-               if (page == NULL)
-                       continue;
-
-               if (page == ttm->dummy_read_page) {
-                       BUG_ON(write);
-                       continue;
-               }
-
-               if (write && dirty && !PageReserved(page))
-                       set_page_dirty_lock(page);
-
-               ttm->pages[i] = NULL;
-               ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
-               put_page(page);
-       }
-       ttm->state = tt_unpopulated;
-       ttm->first_himem_page = ttm->num_pages;
-       ttm->last_lomem_page = -1;
+       ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
 }
 
-static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
+static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
 {
-       struct page *p;
-       struct list_head h;
-       struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
-       int ret;
-
-       while (NULL == (p = ttm->pages[index])) {
-
-               INIT_LIST_HEAD(&h);
-
-               ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
-                                   &ttm->dma_address[index]);
-
-               if (ret != 0)
-                       return NULL;
-
-               p = list_first_entry(&h, struct page, lru);
-
-               ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
-               if (unlikely(ret != 0))
-                       goto out_err;
-
-               if (PageHighMem(p))
-                       ttm->pages[--ttm->first_himem_page] = p;
-               else
-                       ttm->pages[++ttm->last_lomem_page] = p;
-       }
-       return p;
-out_err:
-       put_page(p);
-       return NULL;
-}
-
-struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
-{
-       int ret;
-
-       if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
-               ret = ttm_tt_swapin(ttm);
-               if (unlikely(ret != 0))
-                       return NULL;
-       }
-       return __ttm_tt_get_page(ttm, index);
-}
-
-int ttm_tt_populate(struct ttm_tt *ttm)
-{
-       struct page *page;
-       unsigned long i;
-       struct ttm_backend *be;
-       int ret;
-
-       if (ttm->state != tt_unpopulated)
-               return 0;
-
-       if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
-               ret = ttm_tt_swapin(ttm);
-               if (unlikely(ret != 0))
-                       return ret;
-       }
-
-       be = ttm->be;
-
-       for (i = 0; i < ttm->num_pages; ++i) {
-               page = __ttm_tt_get_page(ttm, i);
-               if (!page)
-                       return -ENOMEM;
-       }
-
-       be->func->populate(be, ttm->num_pages, ttm->pages,
-                          ttm->dummy_read_page, ttm->dma_address);
-       ttm->state = tt_unbound;
-       return 0;
+       ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
+       ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
+                                           sizeof(*ttm->dma_address));
 }
-EXPORT_SYMBOL(ttm_tt_populate);
 
 #ifdef CONFIG_X86
 static inline int ttm_tt_set_page_caching(struct page *p,
@@ -278,153 +159,100 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
 }
 EXPORT_SYMBOL(ttm_tt_set_placement_caching);
 
-static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
-{
-       int i;
-       unsigned count = 0;
-       struct list_head h;
-       struct page *cur_page;
-       struct ttm_backend *be = ttm->be;
-
-       INIT_LIST_HEAD(&h);
-
-       if (be)
-               be->func->clear(be);
-       for (i = 0; i < ttm->num_pages; ++i) {
-
-               cur_page = ttm->pages[i];
-               ttm->pages[i] = NULL;
-               if (cur_page) {
-                       if (page_count(cur_page) != 1)
-                               printk(KERN_ERR TTM_PFX
-                                      "Erroneous page count. "
-                                      "Leaking pages.\n");
-                       ttm_mem_global_free_page(ttm->glob->mem_glob,
-                                                cur_page);
-                       list_add(&cur_page->lru, &h);
-                       count++;
-               }
-       }
-       ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
-                     ttm->dma_address);
-       ttm->state = tt_unpopulated;
-       ttm->first_himem_page = ttm->num_pages;
-       ttm->last_lomem_page = -1;
-}
-
 void ttm_tt_destroy(struct ttm_tt *ttm)
 {
-       struct ttm_backend *be;
-
        if (unlikely(ttm == NULL))
                return;
 
-       be = ttm->be;
-       if (likely(be != NULL)) {
-               be->func->destroy(be);
-               ttm->be = NULL;
+       if (ttm->state == tt_bound) {
+               ttm_tt_unbind(ttm);
        }
 
        if (likely(ttm->pages != NULL)) {
-               if (ttm->page_flags & TTM_PAGE_FLAG_USER)
-                       ttm_tt_free_user_pages(ttm);
-               else
-                       ttm_tt_free_alloced_pages(ttm);
-
-               ttm_tt_free_page_directory(ttm);
+               ttm->bdev->driver->ttm_tt_unpopulate(ttm);
        }
 
        if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
            ttm->swap_storage)
                fput(ttm->swap_storage);
 
-       kfree(ttm);
+       ttm->swap_storage = NULL;
+       ttm->func->destroy(ttm);
 }
 
-int ttm_tt_set_user(struct ttm_tt *ttm,
-                   struct task_struct *tsk,
-                   unsigned long start, unsigned long num_pages)
+int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
+               unsigned long size, uint32_t page_flags,
+               struct page *dummy_read_page)
 {
-       struct mm_struct *mm = tsk->mm;
-       int ret;
-       int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
-       struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
-
-       BUG_ON(num_pages != ttm->num_pages);
-       BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
-
-       /**
-        * Account user pages as lowmem pages for now.
-        */
-
-       ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
-                                  false, false);
-       if (unlikely(ret != 0))
-               return ret;
-
-       down_read(&mm->mmap_sem);
-       ret = get_user_pages(tsk, mm, start, num_pages,
-                            write, 0, ttm->pages, NULL);
-       up_read(&mm->mmap_sem);
+       ttm->bdev = bdev;
+       ttm->glob = bdev->glob;
+       ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       ttm->caching_state = tt_cached;
+       ttm->page_flags = page_flags;
+       ttm->dummy_read_page = dummy_read_page;
+       ttm->state = tt_unpopulated;
+       ttm->swap_storage = NULL;
 
-       if (ret != num_pages && write) {
-               ttm_tt_free_user_pages(ttm);
-               ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
+       ttm_tt_alloc_page_directory(ttm);
+       if (!ttm->pages) {
+               ttm_tt_destroy(ttm);
+               printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
                return -ENOMEM;
        }
-
-       ttm->tsk = tsk;
-       ttm->start = start;
-       ttm->state = tt_unbound;
-
        return 0;
 }
+EXPORT_SYMBOL(ttm_tt_init);
 
-struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
-                            uint32_t page_flags, struct page *dummy_read_page)
+void ttm_tt_fini(struct ttm_tt *ttm)
 {
-       struct ttm_bo_driver *bo_driver = bdev->driver;
-       struct ttm_tt *ttm;
-
-       if (!bo_driver)
-               return NULL;
+       drm_free_large(ttm->pages);
+       ttm->pages = NULL;
+}
+EXPORT_SYMBOL(ttm_tt_fini);
 
-       ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
-       if (!ttm)
-               return NULL;
+int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
+               unsigned long size, uint32_t page_flags,
+               struct page *dummy_read_page)
+{
+       struct ttm_tt *ttm = &ttm_dma->ttm;
 
+       ttm->bdev = bdev;
        ttm->glob = bdev->glob;
        ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       ttm->first_himem_page = ttm->num_pages;
-       ttm->last_lomem_page = -1;
        ttm->caching_state = tt_cached;
        ttm->page_flags = page_flags;
-
        ttm->dummy_read_page = dummy_read_page;
+       ttm->state = tt_unpopulated;
+       ttm->swap_storage = NULL;
 
-       ttm_tt_alloc_page_directory(ttm);
-       if (!ttm->pages) {
+       INIT_LIST_HEAD(&ttm_dma->pages_list);
+       ttm_dma_tt_alloc_page_directory(ttm_dma);
+       if (!ttm->pages || !ttm_dma->dma_address) {
                ttm_tt_destroy(ttm);
                printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
-               return NULL;
-       }
-       ttm->be = bo_driver->create_ttm_backend_entry(bdev);
-       if (!ttm->be) {
-               ttm_tt_destroy(ttm);
-               printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
-               return NULL;
+               return -ENOMEM;
        }
-       ttm->state = tt_unpopulated;
-       return ttm;
+       return 0;
 }
+EXPORT_SYMBOL(ttm_dma_tt_init);
+
+void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
+{
+       struct ttm_tt *ttm = &ttm_dma->ttm;
+
+       drm_free_large(ttm->pages);
+       ttm->pages = NULL;
+       drm_free_large(ttm_dma->dma_address);
+       ttm_dma->dma_address = NULL;
+}
+EXPORT_SYMBOL(ttm_dma_tt_fini);
 
 void ttm_tt_unbind(struct ttm_tt *ttm)
 {
        int ret;
-       struct ttm_backend *be = ttm->be;
 
        if (ttm->state == tt_bound) {
-               ret = be->func->unbind(be);
+               ret = ttm->func->unbind(ttm);
                BUG_ON(ret);
                ttm->state = tt_unbound;
        }
@@ -433,7 +261,6 @@ void ttm_tt_unbind(struct ttm_tt *ttm)
 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
 {
        int ret = 0;
-       struct ttm_backend *be;
 
        if (!ttm)
                return -EINVAL;
@@ -441,25 +268,21 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
        if (ttm->state == tt_bound)
                return 0;
 
-       be = ttm->be;
-
-       ret = ttm_tt_populate(ttm);
+       ret = ttm->bdev->driver->ttm_tt_populate(ttm);
        if (ret)
                return ret;
 
-       ret = be->func->bind(be, bo_mem);
+       ret = ttm->func->bind(ttm, bo_mem);
        if (unlikely(ret != 0))
                return ret;
 
        ttm->state = tt_bound;
 
-       if (ttm->page_flags & TTM_PAGE_FLAG_USER)
-               ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
        return 0;
 }
 EXPORT_SYMBOL(ttm_tt_bind);
 
-static int ttm_tt_swapin(struct ttm_tt *ttm)
+int ttm_tt_swapin(struct ttm_tt *ttm)
 {
        struct address_space *swap_space;
        struct file *swap_storage;
@@ -470,16 +293,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
        int i;
        int ret = -ENOMEM;
 
-       if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
-               ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
-                                     ttm->num_pages);
-               if (unlikely(ret != 0))
-                       return ret;
-
-               ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
-               return 0;
-       }
-
        swap_storage = ttm->swap_storage;
        BUG_ON(swap_storage == NULL);
 
@@ -491,7 +304,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
                        ret = PTR_ERR(from_page);
                        goto out_err;
                }
-               to_page = __ttm_tt_get_page(ttm, i);
+               to_page = ttm->pages[i];
                if (unlikely(to_page == NULL))
                        goto out_err;
 
@@ -512,7 +325,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
 
        return 0;
 out_err:
-       ttm_tt_free_alloced_pages(ttm);
        return ret;
 }
 
@@ -530,18 +342,6 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
        BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
        BUG_ON(ttm->caching_state != tt_cached);
 
-       /*
-        * For user buffers, just unpin the pages, as there should be
-        * vma references.
-        */
-
-       if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
-               ttm_tt_free_user_pages(ttm);
-               ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
-               ttm->swap_storage = NULL;
-               return 0;
-       }
-
        if (!persistent_swap_storage) {
                swap_storage = shmem_file_setup("ttm swap",
                                                ttm->num_pages << PAGE_SHIFT,
@@ -576,7 +376,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
                page_cache_release(to_page);
        }
 
-       ttm_tt_free_alloced_pages(ttm);
+       ttm->bdev->driver->ttm_tt_unpopulate(ttm);
        ttm->swap_storage = swap_storage;
        ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
        if (persistent_swap_storage)
index a83e86d..02661f3 100644 (file)
 
 #include "drm_pciids.h"
 
+static int via_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+       struct via_file_private *file_priv;
+
+       DRM_DEBUG_DRIVER("\n");
+       file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
+       if (!file_priv)
+               return -ENOMEM;
+
+       file->driver_priv = file_priv;
+
+       INIT_LIST_HEAD(&file_priv->obj_list);
+
+       return 0;
+}
+
+void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+       struct via_file_private *file_priv = file->driver_priv;
+
+       kfree(file_priv);
+}
+
 static struct pci_device_id pciidlist[] = {
        viadrv_PCI_IDS
 };
 
+static const struct file_operations via_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = drm_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .llseek = noop_llseek,
+};
+
 static struct drm_driver driver = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
            DRIVER_IRQ_SHARED,
        .load = via_driver_load,
        .unload = via_driver_unload,
+       .open = via_driver_open,
+       .postclose = via_driver_postclose,
        .context_dtor = via_final_context,
        .get_vblank_counter = via_get_vblank_counter,
        .enable_vblank = via_enable_vblank,
@@ -54,17 +90,7 @@ static struct drm_driver driver = {
        .reclaim_buffers_idlelocked = via_reclaim_buffers_locked,
        .lastclose = via_lastclose,
        .ioctls = via_ioctls,
-       .fops = {
-               .owner = THIS_MODULE,
-               .open = drm_open,
-               .release = drm_release,
-               .unlocked_ioctl = drm_ioctl,
-               .mmap = drm_mmap,
-               .poll = drm_poll,
-               .fasync = drm_fasync,
-               .llseek = noop_llseek,
-               },
-
+       .fops = &via_driver_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = DRIVER_DATE,
index 9cf87d9..88edacc 100644 (file)
@@ -24,7 +24,7 @@
 #ifndef _VIA_DRV_H_
 #define _VIA_DRV_H_
 
-#include "drm_sman.h"
+#include "drm_mm.h"
 #define DRIVER_AUTHOR  "Various"
 
 #define DRIVER_NAME            "via"
@@ -88,9 +88,12 @@ typedef struct drm_via_private {
        uint32_t irq_pending_mask;
        int *irq_map;
        unsigned int idle_fault;
-       struct drm_sman sman;
        int vram_initialized;
+       struct drm_mm vram_mm;
        int agp_initialized;
+       struct drm_mm agp_mm;
+       /** Mapping of userspace keys to mm objects */
+       struct idr object_idr;
        unsigned long vram_offset;
        unsigned long agp_offset;
        drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
index 6cca9a7..a2ab343 100644 (file)
@@ -104,15 +104,10 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
 
        dev_priv->chipset = chipset;
 
-       ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
-       if (ret) {
-               kfree(dev_priv);
-               return ret;
-       }
+       idr_init(&dev->object_name_idr);
 
        ret = drm_vblank_init(dev, 1);
        if (ret) {
-               drm_sman_takedown(&dev_priv->sman);
                kfree(dev_priv);
                return ret;
        }
@@ -124,7 +119,8 @@ int via_driver_unload(struct drm_device *dev)
 {
        drm_via_private_t *dev_priv = dev->dev_private;
 
-       drm_sman_takedown(&dev_priv->sman);
+       idr_remove_all(&dev_priv->object_idr);
+       idr_destroy(&dev_priv->object_idr);
 
        kfree(dev_priv);
 
index 6cc2dad..a3574d0 100644 (file)
 #include "drmP.h"
 #include "via_drm.h"
 #include "via_drv.h"
-#include "drm_sman.h"
 
 #define VIA_MM_ALIGN_SHIFT 4
 #define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
 
+struct via_memblock {
+       struct drm_mm_node mm_node;
+       struct list_head owner_list;
+};
+
 int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
        drm_via_agp_t *agp = data;
        drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
-       int ret;
 
        mutex_lock(&dev->struct_mutex);
-       ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0,
-                                agp->size >> VIA_MM_ALIGN_SHIFT);
-
-       if (ret) {
-               DRM_ERROR("AGP memory manager initialisation error\n");
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
-       }
+       drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> VIA_MM_ALIGN_SHIFT);
 
        dev_priv->agp_initialized = 1;
        dev_priv->agp_offset = agp->offset;
@@ -61,17 +57,9 @@ int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
        drm_via_fb_t *fb = data;
        drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
-       int ret;
 
        mutex_lock(&dev->struct_mutex);
-       ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0,
-                                fb->size >> VIA_MM_ALIGN_SHIFT);
-
-       if (ret) {
-               DRM_ERROR("VRAM memory manager initialisation error\n");
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
-       }
+       drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> VIA_MM_ALIGN_SHIFT);
 
        dev_priv->vram_initialized = 1;
        dev_priv->vram_offset = fb->offset;
@@ -108,19 +96,25 @@ void via_lastclose(struct drm_device *dev)
                return;
 
        mutex_lock(&dev->struct_mutex);
-       drm_sman_cleanup(&dev_priv->sman);
-       dev_priv->vram_initialized = 0;
-       dev_priv->agp_initialized = 0;
+       if (dev_priv->vram_initialized) {
+               drm_mm_takedown(&dev_priv->vram_mm);
+               dev_priv->vram_initialized = 0;
+       }
+       if (dev_priv->agp_initialized) {
+               drm_mm_takedown(&dev_priv->agp_mm);
+               dev_priv->agp_initialized = 0;
+       }
        mutex_unlock(&dev->struct_mutex);
 }
 
 int via_mem_alloc(struct drm_device *dev, void *data,
-                 struct drm_file *file_priv)
+                 struct drm_file *file)
 {
        drm_via_mem_t *mem = data;
-       int retval = 0;
-       struct drm_memblock_item *item;
+       int retval = 0, user_key;
+       struct via_memblock *item;
        drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+       struct via_file_private *file_priv = file->driver_priv;
        unsigned long tmpSize;
 
        if (mem->type > VIA_MEM_AGP) {
@@ -136,24 +130,57 @@ int via_mem_alloc(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
+       item = kzalloc(sizeof(*item), GFP_KERNEL);
+       if (!item) {
+               retval = -ENOMEM;
+               goto fail_alloc;
+       }
+
        tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
-       item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0,
-                             (unsigned long)file_priv);
-       mutex_unlock(&dev->struct_mutex);
-       if (item) {
-               mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
-                             dev_priv->vram_offset : dev_priv->agp_offset) +
-                   (item->mm->
-                    offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT);
-               mem->index = item->user_hash.key;
-       } else {
-               mem->offset = 0;
-               mem->size = 0;
-               mem->index = 0;
-               DRM_DEBUG("Video memory allocation failed\n");
+       if (mem->type == VIA_MEM_AGP)
+               retval = drm_mm_insert_node(&dev_priv->agp_mm,
+                                           &item->mm_node,
+                                           tmpSize, 0);
+       else
+               retval = drm_mm_insert_node(&dev_priv->vram_mm,
+                                           &item->mm_node,
+                                           tmpSize, 0);
+       if (retval)
+               goto fail_alloc;
+
+again:
+       if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
                retval = -ENOMEM;
+               goto fail_idr;
        }
 
+       retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
+       if (retval == -EAGAIN)
+               goto again;
+       if (retval)
+               goto fail_idr;
+
+       list_add(&item->owner_list, &file_priv->obj_list);
+       mutex_unlock(&dev->struct_mutex);
+
+       mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
+                     dev_priv->vram_offset : dev_priv->agp_offset) +
+           ((item->mm_node.start) << VIA_MM_ALIGN_SHIFT);
+       mem->index = user_key;
+
+       return 0;
+
+fail_idr:
+       drm_mm_remove_node(&item->mm_node);
+fail_alloc:
+       kfree(item);
+       mutex_unlock(&dev->struct_mutex);
+
+       mem->offset = 0;
+       mem->size = 0;
+       mem->index = 0;
+       DRM_DEBUG("Video memory allocation failed\n");
+
        return retval;
 }
 
@@ -161,24 +188,35 @@ int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
        drm_via_private_t *dev_priv = dev->dev_private;
        drm_via_mem_t *mem = data;
-       int ret;
+       struct via_memblock *obj;
 
        mutex_lock(&dev->struct_mutex);
-       ret = drm_sman_free_key(&dev_priv->sman, mem->index);
+       obj = idr_find(&dev_priv->object_idr, mem->index);
+       if (obj == NULL) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       idr_remove(&dev_priv->object_idr, mem->index);
+       list_del(&obj->owner_list);
+       drm_mm_remove_node(&obj->mm_node);
+       kfree(obj);
        mutex_unlock(&dev->struct_mutex);
+
        DRM_DEBUG("free = 0x%lx\n", mem->index);
 
-       return ret;
+       return 0;
 }
 
 
 void via_reclaim_buffers_locked(struct drm_device *dev,
-                               struct drm_file *file_priv)
+                               struct drm_file *file)
 {
-       drm_via_private_t *dev_priv = dev->dev_private;
+       struct via_file_private *file_priv = file->driver_priv;
+       struct via_memblock *entry, *next;
 
        mutex_lock(&dev->struct_mutex);
-       if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
+       if (list_empty(&file_priv->obj_list)) {
                mutex_unlock(&dev->struct_mutex);
                return;
        }
@@ -186,7 +224,12 @@ void via_reclaim_buffers_locked(struct drm_device *dev,
        if (dev->driver->dma_quiescent)
                dev->driver->dma_quiescent(dev);
 
-       drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
+       list_for_each_entry_safe(entry, next, &file_priv->obj_list,
+                                owner_list) {
+               list_del(&entry->owner_list);
+               drm_mm_remove_node(&entry->mm_node);
+               kfree(entry);
+       }
        mutex_unlock(&dev->struct_mutex);
        return;
 }
index 5a72ed9..1e2c0fb 100644 (file)
@@ -28,6 +28,7 @@
 #include "vmwgfx_drv.h"
 #include "ttm/ttm_bo_driver.h"
 #include "ttm/ttm_placement.h"
+#include "ttm/ttm_page_alloc.h"
 
 static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
        TTM_PL_FLAG_CACHED;
@@ -139,85 +140,63 @@ struct ttm_placement vmw_srf_placement = {
        .busy_placement = gmr_vram_placement_flags
 };
 
-struct vmw_ttm_backend {
-       struct ttm_backend backend;
-       struct page **pages;
-       unsigned long num_pages;
+struct vmw_ttm_tt {
+       struct ttm_tt ttm;
        struct vmw_private *dev_priv;
        int gmr_id;
 };
 
-static int vmw_ttm_populate(struct ttm_backend *backend,
-                           unsigned long num_pages, struct page **pages,
-                           struct page *dummy_read_page,
-                           dma_addr_t *dma_addrs)
+static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
 {
-       struct vmw_ttm_backend *vmw_be =
-           container_of(backend, struct vmw_ttm_backend, backend);
-
-       vmw_be->pages = pages;
-       vmw_be->num_pages = num_pages;
-
-       return 0;
-}
-
-static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
-{
-       struct vmw_ttm_backend *vmw_be =
-           container_of(backend, struct vmw_ttm_backend, backend);
+       struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
 
        vmw_be->gmr_id = bo_mem->start;
 
-       return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages,
-                           vmw_be->num_pages, vmw_be->gmr_id);
+       return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages,
+                           ttm->num_pages, vmw_be->gmr_id);
 }
 
-static int vmw_ttm_unbind(struct ttm_backend *backend)
+static int vmw_ttm_unbind(struct ttm_tt *ttm)
 {
-       struct vmw_ttm_backend *vmw_be =
-           container_of(backend, struct vmw_ttm_backend, backend);
+       struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
 
        vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
        return 0;
 }
 
-static void vmw_ttm_clear(struct ttm_backend *backend)
+static void vmw_ttm_destroy(struct ttm_tt *ttm)
 {
-       struct vmw_ttm_backend *vmw_be =
-               container_of(backend, struct vmw_ttm_backend, backend);
-
-       vmw_be->pages = NULL;
-       vmw_be->num_pages = 0;
-}
-
-static void vmw_ttm_destroy(struct ttm_backend *backend)
-{
-       struct vmw_ttm_backend *vmw_be =
-           container_of(backend, struct vmw_ttm_backend, backend);
+       struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
 
+       ttm_tt_fini(ttm);
        kfree(vmw_be);
 }
 
 static struct ttm_backend_func vmw_ttm_func = {
-       .populate = vmw_ttm_populate,
-       .clear = vmw_ttm_clear,
        .bind = vmw_ttm_bind,
        .unbind = vmw_ttm_unbind,
        .destroy = vmw_ttm_destroy,
 };
 
-struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
+struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
+                                unsigned long size, uint32_t page_flags,
+                                struct page *dummy_read_page)
 {
-       struct vmw_ttm_backend *vmw_be;
+       struct vmw_ttm_tt *vmw_be;
 
        vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
        if (!vmw_be)
                return NULL;
 
-       vmw_be->backend.func = &vmw_ttm_func;
+       vmw_be->ttm.func = &vmw_ttm_func;
        vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
 
-       return &vmw_be->backend;
+       if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+               kfree(vmw_be);
+               return NULL;
+       }
+
+       return &vmw_be->ttm;
 }
 
 int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
@@ -357,7 +336,9 @@ static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
 }
 
 struct ttm_bo_driver vmw_bo_driver = {
-       .create_ttm_backend_entry = vmw_ttm_backend_init,
+       .ttm_tt_create = &vmw_ttm_tt_create,
+       .ttm_tt_populate = &ttm_pool_populate,
+       .ttm_tt_unpopulate = &ttm_pool_unpopulate,
        .invalidate_caches = vmw_invalidate_caches,
        .init_mem_type = vmw_init_mem_type,
        .evict_flags = vmw_evict_flags,
index dff8fc7..f390f5f 100644 (file)
@@ -1064,6 +1064,21 @@ static const struct dev_pm_ops vmw_pm_ops = {
        .resume = vmw_pm_resume,
 };
 
+static const struct file_operations vmwgfx_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = vmw_unlocked_ioctl,
+       .mmap = vmw_mmap,
+       .poll = vmw_fops_poll,
+       .read = vmw_fops_read,
+       .fasync = drm_fasync,
+#if defined(CONFIG_COMPAT)
+       .compat_ioctl = drm_compat_ioctl,
+#endif
+       .llseek = noop_llseek,
+};
+
 static struct drm_driver driver = {
        .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
        DRIVER_MODESET,
@@ -1088,20 +1103,7 @@ static struct drm_driver driver = {
        .master_drop = vmw_master_drop,
        .open = vmw_driver_open,
        .postclose = vmw_postclose,
-       .fops = {
-                .owner = THIS_MODULE,
-                .open = drm_open,
-                .release = drm_release,
-                .unlocked_ioctl = vmw_unlocked_ioctl,
-                .mmap = vmw_mmap,
-                .poll = vmw_fops_poll,
-                .read = vmw_fops_read,
-                .fasync = drm_fasync,
-#if defined(CONFIG_COMPAT)
-                .compat_ioctl = drm_compat_ioctl,
-#endif
-                .llseek = noop_llseek,
-       },
+       .fops = &vmwgfx_driver_fops,
        .name = VMWGFX_DRIVER_NAME,
        .desc = VMWGFX_DRIVER_DESC,
        .date = VMWGFX_DRIVER_DATE,
index f94b33a..0af6ebd 100644 (file)
@@ -690,7 +690,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
 
        /* XXX get the first 3 from the surface info */
        vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
-       vfbs->base.base.pitch = mode_cmd->pitch;
+       vfbs->base.base.pitches[0] = mode_cmd->pitch;
        vfbs->base.base.depth = mode_cmd->depth;
        vfbs->base.base.width = mode_cmd->width;
        vfbs->base.base.height = mode_cmd->height;
@@ -804,7 +804,7 @@ static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
        cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
        cmd->body.format.colorDepth = depth;
        cmd->body.format.reserved = 0;
-       cmd->body.bytesPerLine = framebuffer->base.pitch;
+       cmd->body.bytesPerLine = framebuffer->base.pitches[0];
        cmd->body.ptr.gmrId = framebuffer->user_handle;
        cmd->body.ptr.offset = 0;
 
@@ -1056,7 +1056,7 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
        }
 
        vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
-       vfbd->base.base.pitch = mode_cmd->pitch;
+       vfbd->base.base.pitches[0] = mode_cmd->pitch;
        vfbd->base.base.depth = mode_cmd->depth;
        vfbd->base.base.width = mode_cmd->width;
        vfbd->base.base.height = mode_cmd->height;
@@ -1085,7 +1085,7 @@ out_err1:
 
 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
                                                 struct drm_file *file_priv,
-                                                struct drm_mode_fb_cmd *mode_cmd)
+                                                struct drm_mode_fb_cmd2 *mode_cmd2)
 {
        struct vmw_private *dev_priv = vmw_priv(dev);
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
@@ -1093,8 +1093,16 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
        struct vmw_surface *surface = NULL;
        struct vmw_dma_buffer *bo = NULL;
        struct ttm_base_object *user_obj;
+       struct drm_mode_fb_cmd mode_cmd;
        int ret;
 
+       mode_cmd.width = mode_cmd2->width;
+       mode_cmd.height = mode_cmd2->height;
+       mode_cmd.pitch = mode_cmd2->pitches[0];
+       mode_cmd.handle = mode_cmd2->handles[0];
+       drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
+                                   &mode_cmd.bpp);
+
        /**
         * This code should be conditioned on Screen Objects not being used.
         * If screen objects are used, we can allocate a GMR to hold the
@@ -1102,8 +1110,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
         */
 
        if (!vmw_kms_validate_mode_vram(dev_priv,
-                                       mode_cmd->pitch,
-                                       mode_cmd->height)) {
+                                       mode_cmd.pitch,
+                                       mode_cmd.height)) {
                DRM_ERROR("VRAM size is too small for requested mode.\n");
                return ERR_PTR(-ENOMEM);
        }
@@ -1117,15 +1125,19 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
         * command stream using user-space handles.
         */
 
-       user_obj = ttm_base_object_lookup(tfile, mode_cmd->handle);
+       user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle);
        if (unlikely(user_obj == NULL)) {
                DRM_ERROR("Could not locate requested kms frame buffer.\n");
                return ERR_PTR(-ENOENT);
        }
 
+       /**
+        * End conditioned code.
+        */
+
        /* returns either a dmabuf or surface */
        ret = vmw_user_lookup_handle(dev_priv, tfile,
-                                    mode_cmd->handle,
+                                    mode_cmd.handle,
                                     &surface, &bo);
        if (ret)
                goto err_out;
@@ -1133,10 +1145,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
        /* Create the new framebuffer depending one what we got back */
        if (bo)
                ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
-                                                    mode_cmd);
+                                                    &mode_cmd);
        else if (surface)
                ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
-                                                     surface, &vfb, mode_cmd);
+                                                     surface, &vfb, &mode_cmd);
        else
                BUG();
 
@@ -1344,7 +1356,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
        cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
        cmd->body.format.colorDepth = vfb->base.depth;
        cmd->body.format.reserved = 0;
-       cmd->body.bytesPerLine = vfb->base.pitch;
+       cmd->body.bytesPerLine = vfb->base.pitches[0];
        cmd->body.ptr.gmrId = vfb->user_handle;
        cmd->body.ptr.offset = 0;
 
index e1cb855..a4f7f03 100644 (file)
@@ -29,6 +29,7 @@
 #define VMWGFX_KMS_H_
 
 #include "drmP.h"
+#include "drm_crtc_helper.h"
 #include "vmwgfx_drv.h"
 
 #define VMWGFX_NUM_DISPLAY_UNITS 8
index 8f8dbd4..f77b184 100644 (file)
@@ -95,7 +95,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
                        return 0;
                fb = entry->base.crtc.fb;
 
-               return vmw_kms_write_svga(dev_priv, w, h, fb->pitch,
+               return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
                                          fb->bits_per_pixel, fb->depth);
        }
 
@@ -103,7 +103,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
                entry = list_entry(lds->active.next, typeof(*entry), active);
                fb = entry->base.crtc.fb;
 
-               vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch,
+               vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0],
                                   fb->bits_per_pixel, fb->depth);
        }
 
index 1c7f09e..a37abb5 100644 (file)
@@ -1540,29 +1540,10 @@ out_bad_surface:
 /**
  * Buffer management.
  */
-
-static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
-                                 unsigned long num_pages)
-{
-       static size_t bo_user_size = ~0;
-
-       size_t page_array_size =
-           (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
-
-       if (unlikely(bo_user_size == ~0)) {
-               bo_user_size = glob->ttm_bo_extra_size +
-                   ttm_round_pot(sizeof(struct vmw_dma_buffer));
-       }
-
-       return bo_user_size + page_array_size;
-}
-
 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
 {
        struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-       struct ttm_bo_global *glob = bo->glob;
 
-       ttm_mem_global_free(glob->mem_glob, bo->acc_size);
        kfree(vmw_bo);
 }
 
@@ -1573,24 +1554,12 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
                    void (*bo_free) (struct ttm_buffer_object *bo))
 {
        struct ttm_bo_device *bdev = &dev_priv->bdev;
-       struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
        size_t acc_size;
        int ret;
 
        BUG_ON(!bo_free);
 
-       acc_size =
-           vmw_dmabuf_acc_size(bdev->glob,
-                               (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
-
-       ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
-       if (unlikely(ret != 0)) {
-               /* we must free the bo here as
-                * ttm_buffer_object_init does so as well */
-               bo_free(&vmw_bo->base);
-               return ret;
-       }
-
+       acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
        memset(vmw_bo, 0, sizeof(*vmw_bo));
 
        INIT_LIST_HEAD(&vmw_bo->validate_list);
@@ -1605,9 +1574,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
 {
        struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
-       struct ttm_bo_global *glob = bo->glob;
 
-       ttm_mem_global_free(glob->mem_glob, bo->acc_size);
        kfree(vmw_user_bo);
 }
 
index 114b99a..b8f78eb 100644 (file)
@@ -253,7 +253,7 @@ static void psbfb_copyarea_accel(struct fb_info *info,
                return;
 
        offset = psbfb->gtt->offset;
-       stride = fb->pitch;
+       stride = fb->pitches[0];
 
        switch (fb->depth) {
        case 8:
index 7b97c60..c63a327 100644 (file)
@@ -507,9 +507,9 @@ int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
        if (ret < 0)
                goto psb_intel_pipe_set_base_exit;
        start = psbfb->gtt->offset;
-       offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+       offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
 
-       REG_WRITE(dspstride, crtc->fb->pitch);
+       REG_WRITE(dspstride, crtc->fb->pitches[0]);
 
        dspcntr = REG_READ(dspcntr_reg);
        dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
index 3f39a37..b00761c 100644 (file)
@@ -32,6 +32,7 @@
 #include <drm/drmP.h>
 #include <drm/drm.h>
 #include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
 
 #include "psb_drv.h"
 #include "psb_intel_reg.h"
@@ -273,14 +274,17 @@ static struct fb_ops psbfb_unaccel_ops = {
  */
 static int psb_framebuffer_init(struct drm_device *dev,
                                        struct psb_framebuffer *fb,
-                                       struct drm_mode_fb_cmd *mode_cmd,
+                                       struct drm_mode_fb_cmd2 *mode_cmd,
                                        struct gtt_range *gt)
 {
+       u32 bpp, depth;
        int ret;
 
-       if (mode_cmd->pitch & 63)
+       drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+       if (mode_cmd->pitches[0] & 63)
                return -EINVAL;
-       switch (mode_cmd->bpp) {
+       switch (bpp) {
        case 8:
        case 16:
        case 24:
@@ -313,7 +317,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
 
 static struct drm_framebuffer *psb_framebuffer_create
                        (struct drm_device *dev,
-                        struct drm_mode_fb_cmd *mode_cmd,
+                        struct drm_mode_fb_cmd2 *mode_cmd,
                         struct gtt_range *gt)
 {
        struct psb_framebuffer *fb;
@@ -387,27 +391,28 @@ static int psbfb_create(struct psb_fbdev *fbdev,
        struct fb_info *info;
        struct drm_framebuffer *fb;
        struct psb_framebuffer *psbfb = &fbdev->pfb;
-       struct drm_mode_fb_cmd mode_cmd;
+       struct drm_mode_fb_cmd2 mode_cmd;
        struct device *device = &dev->pdev->dev;
        int size;
        int ret;
        struct gtt_range *backing;
        int gtt_roll = 1;
+       u32 bpp, depth;
 
        mode_cmd.width = sizes->surface_width;
        mode_cmd.height = sizes->surface_height;
-       mode_cmd.bpp = sizes->surface_bpp;
+       bpp = sizes->surface_bpp;
 
        /* No 24bit packed */
-       if (mode_cmd.bpp == 24)
-               mode_cmd.bpp = 32;
+       if (bpp == 24)
+               bpp = 32;
 
        /* Acceleration via the GTT requires pitch to be 4096 byte aligned 
           (ie 1024 or 2048 pixels in normal use) */
-       mode_cmd.pitch =  ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 4096);
-       mode_cmd.depth = sizes->surface_depth;
+       mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096);
+       depth = sizes->surface_depth;
 
-       size = mode_cmd.pitch * mode_cmd.height;
+       size = mode_cmd.pitches[0] * mode_cmd.height;
        size = ALIGN(size, PAGE_SIZE);
 
        /* Allocate the framebuffer in the GTT with stolen page backing */
@@ -421,10 +426,10 @@ static int psbfb_create(struct psb_fbdev *fbdev,
 
                gtt_roll = 0;   /* Don't use GTT accelerated scrolling */
 
-               mode_cmd.pitch =  ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
-               mode_cmd.depth = sizes->surface_depth;
+               mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
+               depth = sizes->surface_depth;
 
-               size = mode_cmd.pitch * mode_cmd.height;
+               size = mode_cmd.pitches[0] * mode_cmd.height;
                size = ALIGN(size, PAGE_SIZE);
 
                /* Allocate the framebuffer in the GTT with stolen page
@@ -443,6 +448,8 @@ static int psbfb_create(struct psb_fbdev *fbdev,
        }
        info->par = fbdev;
 
+       mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
        ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
        if (ret)
                goto out_unref;
@@ -504,7 +511,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
                info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
        }
 
-       drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+       drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
        drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
                                sizes->fb_width, sizes->fb_height);
 
@@ -546,7 +553,7 @@ out_err1:
  */
 static struct drm_framebuffer *psb_user_framebuffer_create
                        (struct drm_device *dev, struct drm_file *filp,
-                        struct drm_mode_fb_cmd *cmd)
+                        struct drm_mode_fb_cmd2 *cmd)
 {
        struct gtt_range *r;
        struct drm_gem_object *obj;
@@ -555,7 +562,7 @@ static struct drm_framebuffer *psb_user_framebuffer_create
         *      Find the GEM object and thus the gtt range object that is
         *      to back this space
         */
-       obj = drm_gem_object_lookup(dev, filp, cmd->handle);
+       obj = drm_gem_object_lookup(dev, filp, cmd->handles[0]);
        if (obj == NULL)
                return ERR_PTR(-ENOENT);
 
index 8eb827e..0b37b7b 100644 (file)
@@ -390,9 +390,9 @@ int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_f
                goto psb_intel_pipe_set_base_exit;
 
        start = psbfb->gtt->offset;
-       offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+       offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
 
-       REG_WRITE(dspstride, crtc->fb->pitch);
+       REG_WRITE(dspstride, crtc->fb->pitches[0]);
        dspcntr = REG_READ(dspcntr_reg);
        dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
 
index c9311a5..980837e 100644 (file)
@@ -543,9 +543,9 @@ int mrst_pipe_set_base(struct drm_crtc *crtc,
                return 0;
 
        start = psbfb->gtt->offset;
-       offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+       offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
 
-       REG_WRITE(dspstride, crtc->fb->pitch);
+       REG_WRITE(dspstride, crtc->fb->pitches[0]);
 
        dspcntr = REG_READ(dspcntr_reg);
        dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
index 986a04d..9581680 100644 (file)
@@ -1151,6 +1151,17 @@ static struct vm_operations_struct psb_gem_vm_ops = {
        .close = drm_gem_vm_close,
 };
 
+static const struct file_operations gma500_driver_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = psb_unlocked_ioctl,
+       .mmap = drm_gem_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .read = drm_read,
+};
+
 static struct drm_driver driver = {
        .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
                           DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM ,
@@ -1179,17 +1190,7 @@ static struct drm_driver driver = {
        .dumb_create = psb_gem_dumb_create,
        .dumb_map_offset = psb_gem_dumb_map_gtt,
        .dumb_destroy = psb_gem_dumb_destroy,
-
-       .fops = {
-                .owner = THIS_MODULE,
-                .open = drm_open,
-                .release = drm_release,
-                .unlocked_ioctl = psb_unlocked_ioctl,
-                .mmap = drm_gem_mmap,
-                .poll = drm_poll,
-                .fasync = drm_fasync,
-                .read = drm_read,
-        },
+       .fops = &gma500_driver_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = PSB_DRM_DRIVER_DATE,
index caa9d86..8565961 100644 (file)
@@ -367,9 +367,9 @@ int psb_intel_pipe_set_base(struct drm_crtc *crtc,
                goto psb_intel_pipe_set_base_exit;
        start = psbfb->gtt->offset;
 
-       offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+       offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
 
-       REG_WRITE(dspstride, crtc->fb->pitch);
+       REG_WRITE(dspstride, crtc->fb->pitches[0]);
 
        dspcntr = REG_READ(dspcntr_reg);
        dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
index 284798a..19e6a20 100644 (file)
@@ -153,7 +153,7 @@ void __init xen_swiotlb_init(int verbose)
        char *m = NULL;
        unsigned int repeat = 3;
 
-       nr_tbl = swioltb_nr_tbl();
+       nr_tbl = swiotlb_nr_tbl();
        if (nr_tbl)
                xen_io_tlb_nslabs = nr_tbl;
        else {
index 3a60ac8..a5c0e10 100644 (file)
@@ -1,4 +1,5 @@
 header-y += drm.h
+header-y += drm_fourcc.h
 header-y += drm_mode.h
 header-y += drm_sarea.h
 header-y += i810_drm.h
index 4be33b4..49d94ed 100644 (file)
@@ -714,6 +714,10 @@ struct drm_get_cap {
 #define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
 #define DRM_IOCTL_MODE_MAP_DUMB    DRM_IOWR(0xB3, struct drm_mode_map_dumb)
 #define DRM_IOCTL_MODE_DESTROY_DUMB    DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
+#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
+#define DRM_IOCTL_MODE_GETPLANE        DRM_IOWR(0xB6, struct drm_mode_get_plane)
+#define DRM_IOCTL_MODE_SETPLANE        DRM_IOWR(0xB7, struct drm_mode_set_plane)
+#define DRM_IOCTL_MODE_ADDFB2          DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
 
 /**
  * Device specific ioctls should only be in their respective headers
index e8acca8..76caa67 100644 (file)
@@ -918,7 +918,7 @@ struct drm_driver {
        int dev_priv_size;
        struct drm_ioctl_desc *ioctls;
        int num_ioctls;
-       struct file_operations fops;
+       const struct file_operations *fops;
        union {
                struct pci_driver *pci;
                struct platform_device *platform_device;
@@ -1696,5 +1696,13 @@ extern void drm_platform_exit(struct drm_driver *driver, struct platform_device
 extern int drm_get_platform_dev(struct platform_device *pdev,
                                struct drm_driver *driver);
 
+/* returns true if currently okay to sleep */
+static __inline__ bool drm_can_sleep(void)
+{
+       if (in_atomic() || in_dbg_master() || irqs_disabled())
+               return false;
+       return true;
+}
+
 #endif                         /* __KERNEL__ */
 #endif
index 8020798..63e4fce 100644 (file)
 #include <linux/spinlock.h>
 #include <linux/types.h>
 #include <linux/idr.h>
-
 #include <linux/fb.h>
 
+#include <drm/drm_fourcc.h>
+
 struct drm_device;
 struct drm_mode_set;
 struct drm_framebuffer;
@@ -44,6 +45,7 @@ struct drm_framebuffer;
 #define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
 #define DRM_MODE_OBJECT_FB 0xfbfbfbfb
 #define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
+#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
 
 struct drm_mode_object {
        uint32_t id;
@@ -118,7 +120,6 @@ struct drm_display_mode {
 
        char name[DRM_DISPLAY_MODE_LEN];
 
-       int connector_count;
        enum drm_mode_status status;
        int type;
 
@@ -238,13 +239,15 @@ struct drm_framebuffer {
        struct list_head head;
        struct drm_mode_object base;
        const struct drm_framebuffer_funcs *funcs;
-       unsigned int pitch;
+       unsigned int pitches[4];
+       unsigned int offsets[4];
        unsigned int width;
        unsigned int height;
        /* depth can be 15 or 16 */
        unsigned int depth;
        int bits_per_pixel;
        int flags;
+       uint32_t pixel_format; /* fourcc format */
        struct list_head filp_head;
        /* if you are using the helper */
        void *helper_private;
@@ -278,6 +281,7 @@ struct drm_crtc;
 struct drm_connector;
 struct drm_encoder;
 struct drm_pending_vblank_event;
+struct drm_plane;
 
 /**
  * drm_crtc_funcs - control CRTCs for a given device
@@ -341,10 +345,21 @@ struct drm_crtc_funcs {
 
 /**
  * drm_crtc - central CRTC control structure
+ * @dev: parent DRM device
+ * @head: list management
+ * @base: base KMS object for ID tracking etc.
  * @enabled: is this CRTC enabled?
+ * @mode: current mode timings
+ * @hwmode: mode timings as programmed to hw regs
  * @x: x position on screen
  * @y: y position on screen
  * @funcs: CRTC control functions
+ * @gamma_size: size of gamma ramp
+ * @gamma_store: gamma ramp values
+ * @framedur_ns: precise frame timing
+ * @framedur_ns: precise line timing
+ * @pixeldur_ns: precise pixel timing
+ * @helper_private: mid-layer private data
  *
  * Each CRTC may have one or more connectors associated with it.  This structure
  * allows the CRTC to be controlled.
@@ -423,6 +438,13 @@ struct drm_connector_funcs {
        void (*force)(struct drm_connector *connector);
 };
 
+/**
+ * drm_encoder_funcs - encoder controls
+ * @reset: reset state (e.g. at init or resume time)
+ * @destroy: cleanup and free associated data
+ *
+ * Encoders sit between CRTCs and connectors.
+ */
 struct drm_encoder_funcs {
        void (*reset)(struct drm_encoder *encoder);
        void (*destroy)(struct drm_encoder *encoder);
@@ -435,6 +457,18 @@ struct drm_encoder_funcs {
 
 /**
  * drm_encoder - central DRM encoder structure
+ * @dev: parent DRM device
+ * @head: list management
+ * @base: base KMS object
+ * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
+ * @possible_crtcs: bitmask of potential CRTC bindings
+ * @possible_clones: bitmask of potential sibling encoders for cloning
+ * @crtc: currently bound CRTC
+ * @funcs: control functions
+ * @helper_private: mid-layer private data
+ *
+ * CRTCs drive pixels to encoders, which convert them into signals
+ * appropriate for a given connector or set of connectors.
  */
 struct drm_encoder {
        struct drm_device *dev;
@@ -470,14 +504,37 @@ enum drm_connector_force {
 
 /**
  * drm_connector - central DRM connector control structure
- * @crtc: CRTC this connector is currently connected to, NULL if none
+ * @dev: parent DRM device
+ * @kdev: kernel device for sysfs attributes
+ * @attr: sysfs attributes
+ * @head: list management
+ * @base: base KMS object
+ * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
+ * @connector_type_id: index into connector type enum
  * @interlace_allowed: can this connector handle interlaced modes?
  * @doublescan_allowed: can this connector handle doublescan?
- * @available_modes: modes available on this connector (from get_modes() + user)
- * @initial_x: initial x position for this connector
- * @initial_y: initial y position for this connector
- * @status: connector connected?
+ * @modes: modes available on this connector (from fill_modes() + user)
+ * @status: one of the drm_connector_status enums (connected, not, or unknown)
+ * @probed_modes: list of modes derived directly from the display
+ * @display_info: information about attached display (e.g. from EDID)
  * @funcs: connector control functions
+ * @user_modes: user added mode list
+ * @edid_blob_ptr: DRM property containing EDID if present
+ * @property_ids: property tracking for this connector
+ * @property_values: value pointers or data for properties
+ * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
+ * @dpms: current dpms state
+ * @helper_private: mid-layer private data
+ * @force: a %DRM_FORCE_<foo> state for forced mode sets
+ * @encoder_ids: valid encoders for this connector
+ * @encoder: encoder driving this connector, if any
+ * @eld: EDID-like data, if present
+ * @dvi_dual: dual link DVI, if found
+ * @max_tmds_clock: max clock rate, if found
+ * @latency_present: AV delay info from ELD, if found
+ * @video_latency: video latency info from ELD, if found
+ * @audio_latency: audio latency info from ELD, if found
+ * @null_edid_counter: track sinks that give us all zeros for the EDID
  *
  * Each connector may be connected to one or more CRTCs, or may be clonable by
  * another connector if they can share a CRTC.  Each connector also has a specific
@@ -498,7 +555,6 @@ struct drm_connector {
        bool doublescan_allowed;
        struct list_head modes; /* list of modes on this connector */
 
-       int initial_x, initial_y;
        enum drm_connector_status status;
 
        /* these are modes added by probing with DDC or the BIOS */
@@ -522,7 +578,6 @@ struct drm_connector {
        /* forced on connector */
        enum drm_connector_force force;
        uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
-       uint32_t force_encoder_id;
        struct drm_encoder *encoder; /* currently active encoder */
 
        /* EDID bits */
@@ -536,7 +591,71 @@ struct drm_connector {
 };
 
 /**
- * struct drm_mode_set
+ * drm_plane_funcs - driver plane control functions
+ * @update_plane: update the plane configuration
+ * @disable_plane: shut down the plane
+ * @destroy: clean up plane resources
+ */
+struct drm_plane_funcs {
+       int (*update_plane)(struct drm_plane *plane,
+                           struct drm_crtc *crtc, struct drm_framebuffer *fb,
+                           int crtc_x, int crtc_y,
+                           unsigned int crtc_w, unsigned int crtc_h,
+                           uint32_t src_x, uint32_t src_y,
+                           uint32_t src_w, uint32_t src_h);
+       int (*disable_plane)(struct drm_plane *plane);
+       void (*destroy)(struct drm_plane *plane);
+};
+
+/**
+ * drm_plane - central DRM plane control structure
+ * @dev: DRM device this plane belongs to
+ * @head: for list management
+ * @base: base mode object
+ * @possible_crtcs: pipes this plane can be bound to
+ * @format_types: array of formats supported by this plane
+ * @format_count: number of formats supported
+ * @crtc: currently bound CRTC
+ * @fb: currently bound fb
+ * @gamma_size: size of gamma table
+ * @gamma_store: gamma correction table
+ * @enabled: enabled flag
+ * @funcs: helper functions
+ * @helper_private: storage for drver layer
+ */
+struct drm_plane {
+       struct drm_device *dev;
+       struct list_head head;
+
+       struct drm_mode_object base;
+
+       uint32_t possible_crtcs;
+       uint32_t *format_types;
+       uint32_t format_count;
+
+       struct drm_crtc *crtc;
+       struct drm_framebuffer *fb;
+
+       /* CRTC gamma size for reporting to userspace */
+       uint32_t gamma_size;
+       uint16_t *gamma_store;
+
+       bool enabled;
+
+       const struct drm_plane_funcs *funcs;
+       void *helper_private;
+};
+
+/**
+ * drm_mode_set - new values for a CRTC config change
+ * @head: list management
+ * @fb: framebuffer to use for new config
+ * @crtc: CRTC whose configuration we're about to change
+ * @mode: mode timings to use
+ * @x: position of this CRTC relative to @fb
+ * @y: position of this CRTC relative to @fb
+ * @connectors: array of connectors to drive with this CRTC if possible
+ * @num_connectors: size of @connectors array
  *
  * Represents a single crtc the connectors that it drives with what mode
  * and from which framebuffer it scans out from.
@@ -558,13 +677,33 @@ struct drm_mode_set {
 };
 
 /**
- * struct drm_mode_config_funcs - configure CRTCs for a given screen layout
+ * struct drm_mode_config_funcs - basic driver provided mode setting functions
+ * @fb_create: create a new framebuffer object
+ * @output_poll_changed: function to handle output configuration changes
+ *
+ * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
+ * involve drivers.
  */
 struct drm_mode_config_funcs {
-       struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd *mode_cmd);
+       struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
+                                            struct drm_file *file_priv,
+                                            struct drm_mode_fb_cmd2 *mode_cmd);
        void (*output_poll_changed)(struct drm_device *dev);
 };
 
+/**
+ * drm_mode_group - group of mode setting resources for potential sub-grouping
+ * @num_crtcs: CRTC count
+ * @num_encoders: encoder count
+ * @num_connectors: connector count
+ * @id_list: list of KMS object IDs in this group
+ *
+ * Currently this simply tracks the global mode setting state.  But in the
+ * future it could allow groups of objects to be set aside into independent
+ * control groups for use by different user level processes (e.g. two X servers
+ * running simultaneously on different heads, each with their own mode
+ * configuration and freedom of mode setting).
+ */
 struct drm_mode_group {
        uint32_t num_crtcs;
        uint32_t num_encoders;
@@ -576,7 +715,30 @@ struct drm_mode_group {
 
 /**
  * drm_mode_config - Mode configuration control structure
+ * @mutex: mutex protecting KMS related lists and structures
+ * @idr_mutex: mutex for KMS ID allocation and management
+ * @crtc_idr: main KMS ID tracking object
+ * @num_fb: number of fbs available
+ * @fb_list: list of framebuffers available
+ * @num_connector: number of connectors on this device
+ * @connector_list: list of connector objects
+ * @num_encoder: number of encoders on this device
+ * @encoder_list: list of encoder objects
+ * @num_crtc: number of CRTCs on this device
+ * @crtc_list: list of CRTC objects
+ * @min_width: minimum pixel width on this device
+ * @min_height: minimum pixel height on this device
+ * @max_width: maximum pixel width on this device
+ * @max_height: maximum pixel height on this device
+ * @funcs: core driver provided mode setting functions
+ * @fb_base: base address of the framebuffer
+ * @poll_enabled: track polling status for this device
+ * @output_poll_work: delayed work for polling in process context
+ * @*_property: core property tracking
  *
+ * Core mode resource tracking structure.  All CRTC, encoders, and connectors
+ * enumerated by the driver are added here, as are global properties.  Some
+ * global restrictions are also here, e.g. dimension restrictions.
  */
 struct drm_mode_config {
        struct mutex mutex; /* protects configuration (mode lists etc.) */
@@ -589,6 +751,8 @@ struct drm_mode_config {
        struct list_head connector_list;
        int num_encoder;
        struct list_head encoder_list;
+       int num_plane;
+       struct list_head plane_list;
 
        int num_crtc;
        struct list_head crtc_list;
@@ -641,6 +805,7 @@ struct drm_mode_config {
 #define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
 #define obj_to_property(x) container_of(x, struct drm_property, base)
 #define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
+#define obj_to_plane(x) container_of(x, struct drm_plane, base)
 
 
 extern void drm_crtc_init(struct drm_device *dev,
@@ -660,6 +825,14 @@ extern void drm_encoder_init(struct drm_device *dev,
                             const struct drm_encoder_funcs *funcs,
                             int encoder_type);
 
+extern int drm_plane_init(struct drm_device *dev,
+                         struct drm_plane *plane,
+                         unsigned long possible_crtcs,
+                         const struct drm_plane_funcs *funcs,
+                         const uint32_t *formats, uint32_t format_count,
+                         bool priv);
+extern void drm_plane_cleanup(struct drm_plane *plane);
+
 extern void drm_encoder_cleanup(struct drm_encoder *encoder);
 
 extern char *drm_get_connector_name(struct drm_connector *connector);
@@ -753,17 +926,25 @@ extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
 /* IOCTLs */
 extern int drm_mode_getresources(struct drm_device *dev,
                                 void *data, struct drm_file *file_priv);
-
+extern int drm_mode_getplane_res(struct drm_device *dev, void *data,
+                                  struct drm_file *file_priv);
 extern int drm_mode_getcrtc(struct drm_device *dev,
                            void *data, struct drm_file *file_priv);
 extern int drm_mode_getconnector(struct drm_device *dev,
                              void *data, struct drm_file *file_priv);
 extern int drm_mode_setcrtc(struct drm_device *dev,
                            void *data, struct drm_file *file_priv);
+extern int drm_mode_getplane(struct drm_device *dev,
+                              void *data, struct drm_file *file_priv);
+extern int drm_mode_setplane(struct drm_device *dev,
+                              void *data, struct drm_file *file_priv);
 extern int drm_mode_cursor_ioctl(struct drm_device *dev,
                                void *data, struct drm_file *file_priv);
 extern int drm_mode_addfb(struct drm_device *dev,
                          void *data, struct drm_file *file_priv);
+extern int drm_mode_addfb2(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv);
+extern uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
 extern int drm_mode_rmfb(struct drm_device *dev,
                         void *data, struct drm_file *file_priv);
 extern int drm_mode_getfb(struct drm_device *dev,
@@ -824,4 +1005,7 @@ extern int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
                                    void *data, struct drm_file *file_priv);
 extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
                                      void *data, struct drm_file *file_priv);
+
+extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+                                int *bpp);
 #endif /* __DRM_CRTC_H__ */
index 73b0712..37515d1 100644 (file)
@@ -117,7 +117,7 @@ extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
 extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
 
 extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
-                                         struct drm_mode_fb_cmd *mode_cmd);
+                                         struct drm_mode_fb_cmd2 *mode_cmd);
 
 static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
                                       const struct drm_crtc_helper_funcs *funcs)
@@ -144,4 +144,7 @@ extern void drm_helper_hpd_irq_event(struct drm_device *dev);
 
 extern void drm_kms_helper_poll_disable(struct drm_device *dev);
 extern void drm_kms_helper_poll_enable(struct drm_device *dev);
+
+extern int drm_format_num_planes(uint32_t format);
+
 #endif
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
new file mode 100644 (file)
index 0000000..bdf0152
--- /dev/null
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DRM_FOURCC_H
+#define DRM_FOURCC_H
+
+#include <linux/types.h>
+
+#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
+                                ((__u32)(c) << 16) | ((__u32)(d) << 24))
+
+#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
+
+/* color index */
+#define DRM_FORMAT_C8          fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
+
+/* 8 bpp RGB */
+#define DRM_FORMAT_RGB332      fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
+#define DRM_FORMAT_BGR233      fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
+
+/* 16 bpp RGB */
+#define DRM_FORMAT_XRGB4444    fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */
+#define DRM_FORMAT_XBGR4444    fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */
+#define DRM_FORMAT_RGBX4444    fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */
+#define DRM_FORMAT_BGRX4444    fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */
+
+#define DRM_FORMAT_ARGB4444    fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */
+#define DRM_FORMAT_ABGR4444    fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */
+#define DRM_FORMAT_RGBA4444    fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */
+#define DRM_FORMAT_BGRA4444    fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */
+
+#define DRM_FORMAT_XRGB1555    fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */
+#define DRM_FORMAT_XBGR1555    fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */
+#define DRM_FORMAT_RGBX5551    fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */
+#define DRM_FORMAT_BGRX5551    fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */
+
+#define DRM_FORMAT_ARGB1555    fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */
+#define DRM_FORMAT_ABGR1555    fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */
+#define DRM_FORMAT_RGBA5551    fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */
+#define DRM_FORMAT_BGRA5551    fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */
+
+#define DRM_FORMAT_RGB565      fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */
+#define DRM_FORMAT_BGR565      fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */
+
+/* 24 bpp RGB */
+#define DRM_FORMAT_RGB888      fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */
+#define DRM_FORMAT_BGR888      fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */
+
+/* 32 bpp RGB */
+#define DRM_FORMAT_XRGB8888    fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */
+#define DRM_FORMAT_XBGR8888    fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */
+#define DRM_FORMAT_RGBX8888    fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */
+#define DRM_FORMAT_BGRX8888    fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */
+
+#define DRM_FORMAT_ARGB8888    fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */
+#define DRM_FORMAT_ABGR8888    fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */
+#define DRM_FORMAT_RGBA8888    fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */
+#define DRM_FORMAT_BGRA8888    fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */
+
+#define DRM_FORMAT_XRGB2101010 fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */
+#define DRM_FORMAT_XBGR2101010 fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */
+#define DRM_FORMAT_RGBX1010102 fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */
+#define DRM_FORMAT_BGRX1010102 fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */
+
+#define DRM_FORMAT_ARGB2101010 fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */
+#define DRM_FORMAT_ABGR2101010 fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */
+#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
+#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+
+/* packed YCbCr */
+#define DRM_FORMAT_YUYV                fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
+#define DRM_FORMAT_YVYU                fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
+#define DRM_FORMAT_UYVY                fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */
+#define DRM_FORMAT_VYUY                fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
+
+#define DRM_FORMAT_AYUV                fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
+
+/*
+ * 2 plane YCbCr
+ * index 0 = Y plane, [7:0] Y
+ * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
+ * or
+ * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
+ */
+#define DRM_FORMAT_NV12                fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV21                fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
+#define DRM_FORMAT_NV16                fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV61                fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
+
+/* 2 non contiguous plane YCbCr */
+#define DRM_FORMAT_NV12M       fourcc_code('N', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV12MT      fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
+
+/*
+ * 3 plane YCbCr
+ * index 0: Y plane, [7:0] Y
+ * index 1: Cb plane, [7:0] Cb
+ * index 2: Cr plane, [7:0] Cr
+ * or
+ * index 1: Cr plane, [7:0] Cr
+ * index 2: Cb plane, [7:0] Cb
+ */
+#define DRM_FORMAT_YUV410      fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU410      fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV411      fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU411      fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV420      fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU420      fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV422      fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU422      fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV444      fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU444      fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
+
+/* 3 non contiguous plane YCbCr */
+#define DRM_FORMAT_YUV420M     fourcc_code('Y', 'M', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
+
+#endif /* DRM_FOURCC_H */
index ddd46db..2a2acda 100644 (file)
@@ -120,11 +120,48 @@ struct drm_mode_crtc {
        struct drm_mode_modeinfo mode;
 };
 
-#define DRM_MODE_ENCODER_NONE   0
-#define DRM_MODE_ENCODER_DAC    1
-#define DRM_MODE_ENCODER_TMDS   2
-#define DRM_MODE_ENCODER_LVDS   3
-#define DRM_MODE_ENCODER_TVDAC  4
+#define DRM_MODE_PRESENT_TOP_FIELD     (1<<0)
+#define DRM_MODE_PRESENT_BOTTOM_FIELD  (1<<1)
+
+/* Planes blend with or override other bits on the CRTC */
+struct drm_mode_set_plane {
+       __u32 plane_id;
+       __u32 crtc_id;
+       __u32 fb_id; /* fb object contains surface format type */
+       __u32 flags; /* see above flags */
+
+       /* Signed dest location allows it to be partially off screen */
+       __s32 crtc_x, crtc_y;
+       __u32 crtc_w, crtc_h;
+
+       /* Source values are 16.16 fixed point */
+       __u32 src_x, src_y;
+       __u32 src_h, src_w;
+};
+
+struct drm_mode_get_plane {
+       __u32 plane_id;
+
+       __u32 crtc_id;
+       __u32 fb_id;
+
+       __u32 possible_crtcs;
+       __u32 gamma_size;
+
+       __u32 count_format_types;
+       __u64 format_type_ptr;
+};
+
+struct drm_mode_get_plane_res {
+       __u64 plane_id_ptr;
+       __u32 count_planes;
+};
+
+#define DRM_MODE_ENCODER_NONE  0
+#define DRM_MODE_ENCODER_DAC   1
+#define DRM_MODE_ENCODER_TMDS  2
+#define DRM_MODE_ENCODER_LVDS  3
+#define DRM_MODE_ENCODER_TVDAC 4
 #define DRM_MODE_ENCODER_VIRTUAL 5
 
 struct drm_mode_get_encoder {
@@ -231,6 +268,33 @@ struct drm_mode_fb_cmd {
        __u32 handle;
 };
 
+#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
+
+struct drm_mode_fb_cmd2 {
+       __u32 fb_id;
+       __u32 width, height;
+       __u32 pixel_format; /* fourcc code from drm_fourcc.h */
+       __u32 flags; /* see above flags */
+
+       /*
+        * In case of planar formats, this ioctl allows up to 4
+        * buffer objects with offets and pitches per plane.
+        * The pitch and offset order is dictated by the fourcc,
+        * e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
+        *
+        *   YUV 4:2:0 image with a plane of 8 bit Y samples
+        *   followed by an interleaved U/V plane containing
+        *   8 bit 2x2 subsampled colour difference samples.
+        *
+        * So it would consist of Y as offset[0] and UV as
+        * offeset[1].  Note that offset[0] will generally
+        * be 0.
+        */
+       __u32 handles[4];
+       __u32 pitches[4]; /* pitch for each plane */
+       __u32 offsets[4]; /* offset of each plane */
+};
+
 #define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
 #define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
 #define DRM_MODE_FB_DIRTY_FLAGS         0x03
diff --git a/include/drm/drm_sman.h b/include/drm/drm_sman.h
deleted file mode 100644 (file)
index 08ecf83..0000000
+++ /dev/null
@@ -1,176 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- **************************************************************************/
-/*
- * Simple memory MANager interface that keeps track on allocate regions on a
- * per "owner" basis. All regions associated with an "owner" can be released
- * with a simple call. Typically if the "owner" exists. The owner is any
- * "unsigned long" identifier. Can typically be a pointer to a file private
- * struct or a context identifier.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#ifndef DRM_SMAN_H
-#define DRM_SMAN_H
-
-#include "drmP.h"
-#include "drm_hashtab.h"
-
-/*
- * A class that is an abstration of a simple memory allocator.
- * The sman implementation provides a default such allocator
- * using the drm_mm.c implementation. But the user can replace it.
- * See the SiS implementation, which may use the SiS FB kernel module
- * for memory management.
- */
-
-struct drm_sman_mm {
-       /* private info. If allocated, needs to be destroyed by the destroy
-          function */
-       void *private;
-
-       /* Allocate a memory block with given size and alignment.
-          Return an opaque reference to the memory block */
-
-       void *(*allocate) (void *private, unsigned long size,
-                          unsigned alignment);
-
-       /* Free a memory block. "ref" is the opaque reference that we got from
-          the "alloc" function */
-
-       void (*free) (void *private, void *ref);
-
-       /* Free all resources associated with this allocator */
-
-       void (*destroy) (void *private);
-
-       /* Return a memory offset from the opaque reference returned from the
-          "alloc" function */
-
-       unsigned long (*offset) (void *private, void *ref);
-};
-
-struct drm_memblock_item {
-       struct list_head owner_list;
-       struct drm_hash_item user_hash;
-       void *mm_info;
-       struct drm_sman_mm *mm;
-       struct drm_sman *sman;
-};
-
-struct drm_sman {
-       struct drm_sman_mm *mm;
-       int num_managers;
-       struct drm_open_hash owner_hash_tab;
-       struct drm_open_hash user_hash_tab;
-       struct list_head owner_items;
-};
-
-/*
- * Take down a memory manager. This function should only be called after a
- * successful init and after a call to drm_sman_cleanup.
- */
-
-extern void drm_sman_takedown(struct drm_sman * sman);
-
-/*
- * Allocate structures for a manager.
- * num_managers are the number of memory pools to manage. (VRAM, AGP, ....)
- * user_order is the log2 of the number of buckets in the user hash table.
- *         set this to approximately log2 of the max number of memory regions
- *         that will be allocated for _all_ pools together.
- * owner_order is the log2 of the number of buckets in the owner hash table.
- *         set this to approximately log2 of
- *         the number of client file connections that will
- *         be using the manager.
- *
- */
-
-extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
-                        unsigned int user_order, unsigned int owner_order);
-
-/*
- * Initialize a drm_mm.c allocator. Should be called only once for each
- * manager unless a customized allogator is used.
- */
-
-extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
-                             unsigned long start, unsigned long size);
-
-/*
- * Initialize a customized allocator for one of the managers.
- * (See the SiS module). The object pointed to by "allocator" is copied,
- * so it can be destroyed after this call.
- */
-
-extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger,
-                               struct drm_sman_mm * allocator);
-
-/*
- * Allocate a memory block. Aligment is not implemented yet.
- */
-
-extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman,
-                                               unsigned int manager,
-                                               unsigned long size,
-                                               unsigned alignment,
-                                               unsigned long owner);
-/*
- * Free a memory block identified by its user hash key.
- */
-
-extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key);
-
-/*
- * returns 1 iff there are no stale memory blocks associated with this owner.
- * Typically called to determine if we need to idle the hardware and call
- * drm_sman_owner_cleanup. If there are no stale memory blocks, it removes all
- * resources associated with owner.
- */
-
-extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner);
-
-/*
- * Frees all stale memory blocks associated with this owner. Note that this
- * requires that the hardware is finished with all blocks, so the graphics engine
- * should be idled before this call is made. This function also frees
- * any resources associated with "owner" and should be called when owner
- * is not going to be referenced anymore.
- */
-
-extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner);
-
-/*
- * Frees all stale memory blocks associated with the memory manager.
- * See idling above.
- */
-
-extern void drm_sman_cleanup(struct drm_sman * sman);
-
-#endif
index 1205043..5e120f1 100644 (file)
@@ -74,9 +74,16 @@ struct drm_exynos_gem_mmap {
        uint64_t mapped;
 };
 
+struct drm_exynos_plane_set_zpos {
+       __u32 plane_id;
+       __s32 zpos;
+};
+
 #define DRM_EXYNOS_GEM_CREATE          0x00
 #define DRM_EXYNOS_GEM_MAP_OFFSET      0x01
 #define DRM_EXYNOS_GEM_MMAP            0x02
+/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
+#define DRM_EXYNOS_PLANE_SET_ZPOS      0x06
 
 #define DRM_IOCTL_EXYNOS_GEM_CREATE            DRM_IOWR(DRM_COMMAND_BASE + \
                DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
@@ -87,6 +94,9 @@ struct drm_exynos_gem_mmap {
 #define DRM_IOCTL_EXYNOS_GEM_MMAP      DRM_IOWR(DRM_COMMAND_BASE + \
                DRM_EXYNOS_GEM_MMAP, struct drm_exynos_gem_mmap)
 
+#define DRM_IOCTL_EXYNOS_PLANE_SET_ZPOS        DRM_IOWR(DRM_COMMAND_BASE + \
+               DRM_EXYNOS_PLANE_SET_ZPOS, struct drm_exynos_plane_set_zpos)
+
 /**
  * Platform Specific Structure for DRM based FIMD.
  *
@@ -102,4 +112,31 @@ struct exynos_drm_fimd_pdata {
        unsigned int                    bpp;
 };
 
+/**
+ * Platform Specific Structure for DRM based HDMI.
+ *
+ * @hdmi_dev: device point to specific hdmi driver.
+ * @mixer_dev: device point to specific mixer driver.
+ *
+ * this structure is used for common hdmi driver and each device object
+ * would be used to access specific device driver(hdmi or mixer driver)
+ */
+struct exynos_drm_common_hdmi_pd {
+       struct device *hdmi_dev;
+       struct device *mixer_dev;
+};
+
+/**
+ * Platform Specific Structure for DRM based HDMI core.
+ *
+ * @timing: default video mode for initializing
+ * @default_win: default window layer number to be used for UI.
+ * @bpp: default bit per pixel.
+ */
+struct exynos_drm_hdmi_pdata {
+       struct fb_videomode             timing;
+       unsigned int                    default_win;
+       unsigned int                    bpp;
+};
+
 #endif
diff --git a/include/drm/gma_drm.h b/include/drm/gma_drm.h
new file mode 100644 (file)
index 0000000..1136867
--- /dev/null
@@ -0,0 +1,91 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ * Copyright (c) 2008, Tungsten Graphics Inc.  Cedar Park, TX., USA.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PSB_DRM_H_
+#define _PSB_DRM_H_
+
+/*
+ *     Manage the LUT for an output
+ */
+struct drm_psb_dpst_lut_arg {
+       uint8_t lut[256];
+       int output_id;
+};
+
+/*
+ *     Validate modes
+ */
+struct drm_psb_mode_operation_arg {
+       u32 obj_id;
+       u16 operation;
+       struct drm_mode_modeinfo mode;
+       u64 data;
+};
+
+/*
+ *     Query the stolen memory for smarter management of
+ *     memory by the server
+ */
+struct drm_psb_stolen_memory_arg {
+       u32 base;
+       u32 size;
+};
+
+struct drm_psb_get_pipe_from_crtc_id_arg {
+       /** ID of CRTC being requested **/
+       u32 crtc_id;
+       /** pipe of requested CRTC **/
+       u32 pipe;
+};
+
+struct drm_psb_gem_create {
+       __u64 size;
+       __u32 handle;
+       __u32 flags;
+#define GMA_GEM_CREATE_STOLEN          1       /* Stolen memory can be used */
+};
+
+struct drm_psb_gem_mmap {
+       __u32 handle;
+       __u32 pad;
+       /**
+        * Fake offset to use for subsequent mmap call
+        *
+        * This is a fixed-size type for 32/64 compatibility.
+        */
+       __u64 offset;
+};
+
+/* Controlling the kernel modesetting buffers */
+
+#define DRM_GMA_GEM_CREATE     0x00            /* Create a GEM object */
+#define DRM_GMA_GEM_MMAP       0x01            /* Map GEM memory */
+#define DRM_GMA_STOLEN_MEMORY  0x02            /* Report stolen memory */
+#define DRM_GMA_2D_OP          0x03            /* Will be merged later */
+#define DRM_GMA_GAMMA          0x04            /* Set gamma table */
+#define DRM_GMA_ADB            0x05            /* Get backlight */
+#define DRM_GMA_DPST_BL                0x06            /* Set backlight */
+#define DRM_GMA_GET_PIPE_FROM_CRTC_ID 0x1      /* CRTC to physical pipe# */
+#define DRM_GMA_MODE_OPERATION 0x07            /* Mode validation/DC set */
+#define        PSB_MODE_OPERATION_MODE_VALID   0x01
+
+
+#endif
index 28c0d11..924f6a4 100644 (file)
@@ -198,6 +198,8 @@ typedef struct _drm_i915_sarea {
 #define DRM_I915_OVERLAY_PUT_IMAGE     0x27
 #define DRM_I915_OVERLAY_ATTRS 0x28
 #define DRM_I915_GEM_EXECBUFFER2       0x29
+#define DRM_I915_GET_SPRITE_COLORKEY   0x2a
+#define DRM_I915_SET_SPRITE_COLORKEY   0x2b
 
 #define DRM_IOCTL_I915_INIT            DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 #define DRM_IOCTL_I915_FLUSH           DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -239,6 +241,8 @@ typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_GEM_MADVISE     DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
 #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE       DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
 #define DRM_IOCTL_I915_OVERLAY_ATTRS   DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
+#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
 
 /* Allow drivers to submit batchbuffers directly to hardware, relying
  * on the security mechanisms provided by hardware.
@@ -291,6 +295,7 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_HAS_COHERENT_RINGS   13
 #define I915_PARAM_HAS_EXEC_CONSTANTS   14
 #define I915_PARAM_HAS_RELAXED_DELTA    15
+#define I915_PARAM_HAS_GEN7_SOL_RESET   16
 
 typedef struct drm_i915_getparam {
        int param;
@@ -653,6 +658,9 @@ struct drm_i915_gem_execbuffer2 {
        __u64 rsvd2;
 };
 
+/** Resets the SO write offset registers for transform feedback on gen7. */
+#define I915_EXEC_GEN7_SOL_RESET       (1<<8)
+
 struct drm_i915_gem_pin {
        /** Handle of the buffer to be pinned. */
        __u32 handle;
@@ -844,4 +852,36 @@ struct drm_intel_overlay_attrs {
        __u32 gamma5;
 };
 
+/*
+ * Intel sprite handling
+ *
+ * Color keying works with a min/mask/max tuple.  Both source and destination
+ * color keying is allowed.
+ *
+ * Source keying:
+ * Sprite pixels within the min & max values, masked against the color channels
+ * specified in the mask field, will be transparent.  All other pixels will
+ * be displayed on top of the primary plane.  For RGB surfaces, only the min
+ * and mask fields will be used; ranged compares are not allowed.
+ *
+ * Destination keying:
+ * Primary plane pixels that match the min value, masked against the color
+ * channels specified in the mask field, will be replaced by corresponding
+ * pixels from the sprite plane.
+ *
+ * Note that source & destination keying are exclusive; only one can be
+ * active on a given plane.
+ */
+
+#define I915_SET_COLORKEY_NONE         (1<<0) /* disable color key matching */
+#define I915_SET_COLORKEY_DESTINATION  (1<<1)
+#define I915_SET_COLORKEY_SOURCE       (1<<2)
+struct drm_intel_sprite_colorkey {
+       __u32 plane_id;
+       __u32 min_value;
+       __u32 channel_mask;
+       __u32 max_value;
+       __u32 flags;
+};
+
 #endif                         /* _I915_DRM_H_ */
index be94be6..b55da40 100644 (file)
@@ -509,6 +509,7 @@ typedef struct {
 #define DRM_RADEON_GEM_SET_TILING      0x28
 #define DRM_RADEON_GEM_GET_TILING      0x29
 #define DRM_RADEON_GEM_BUSY            0x2a
+#define DRM_RADEON_GEM_VA              0x2b
 
 #define DRM_IOCTL_RADEON_CP_INIT    DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
 #define DRM_IOCTL_RADEON_CP_START   DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_CP_START)
@@ -550,6 +551,7 @@ typedef struct {
 #define DRM_IOCTL_RADEON_GEM_SET_TILING        DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling)
 #define DRM_IOCTL_RADEON_GEM_GET_TILING        DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling)
 #define DRM_IOCTL_RADEON_GEM_BUSY      DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
+#define DRM_IOCTL_RADEON_GEM_VA                DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va)
 
 typedef struct drm_radeon_init {
        enum {
@@ -872,12 +874,39 @@ struct drm_radeon_gem_pwrite {
        uint64_t data_ptr;
 };
 
+#define RADEON_VA_MAP                  1
+#define RADEON_VA_UNMAP                        2
+
+#define RADEON_VA_RESULT_OK            0
+#define RADEON_VA_RESULT_ERROR         1
+#define RADEON_VA_RESULT_VA_EXIST      2
+
+#define RADEON_VM_PAGE_VALID           (1 << 0)
+#define RADEON_VM_PAGE_READABLE                (1 << 1)
+#define RADEON_VM_PAGE_WRITEABLE       (1 << 2)
+#define RADEON_VM_PAGE_SYSTEM          (1 << 3)
+#define RADEON_VM_PAGE_SNOOPED         (1 << 4)
+
+struct drm_radeon_gem_va {
+       uint32_t                handle;
+       uint32_t                operation;
+       uint32_t                vm_id;
+       uint32_t                flags;
+       uint64_t                offset;
+};
+
 #define RADEON_CHUNK_ID_RELOCS 0x01
 #define RADEON_CHUNK_ID_IB     0x02
 #define RADEON_CHUNK_ID_FLAGS  0x03
 
 /* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
 #define RADEON_CS_KEEP_TILING_FLAGS 0x01
+#define RADEON_CS_USE_VM            0x02
+/* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */
+#define RADEON_CS_RING_GFX          0
+#define RADEON_CS_RING_COMPUTE      1
+/* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */
+/* 0 = normal, + = higher priority, - = lower priority */
 
 struct drm_radeon_cs_chunk {
        uint32_t                chunk_id;
@@ -885,6 +914,9 @@ struct drm_radeon_cs_chunk {
        uint64_t                chunk_data;
 };
 
+/* drm_radeon_cs_reloc.flags */
+#define RADEON_RELOC_DONT_SYNC         0x01
+
 struct drm_radeon_cs_reloc {
        uint32_t                handle;
        uint32_t                read_domains;
@@ -916,6 +948,10 @@ struct drm_radeon_cs {
 #define RADEON_INFO_NUM_TILE_PIPES     0x0b /* tile pipes for r600+ */
 #define RADEON_INFO_FUSION_GART_WORKING        0x0c /* fusion writes to GTT were broken before this */
 #define RADEON_INFO_BACKEND_MAP                0x0d /* pipe to backend map, needed by mesa */
+/* virtual address start, va < start are reserved by the kernel */
+#define RADEON_INFO_VA_START           0x0e
+/* maximum size of ib using the virtual memory cs */
+#define RADEON_INFO_IB_VM_MAX_SIZE     0x0f
 
 struct drm_radeon_info {
        uint32_t                request;
index 30f7b38..035b804 100644 (file)
@@ -64,4 +64,8 @@ typedef struct {
        unsigned int offset, size;
 } drm_sis_fb_t;
 
+struct sis_file_private {
+       struct list_head obj_list;
+};
+
 #endif                         /* __SIS_DRM_H__ */
index 42e3469..974c8f8 100644 (file)
@@ -122,17 +122,12 @@ struct ttm_mem_reg {
  * be mmapped by user space. Each of these bos occupy a slot in the
  * device address space, that can be used for normal vm operations.
  *
- * @ttm_bo_type_user: These are user-space memory areas that are made
- * available to the GPU by mapping the buffer pages into the GPU aperture
- * space. These buffers cannot be mmaped from the device address space.
- *
  * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
  * but they cannot be accessed from user-space. For kernel-only use.
  */
 
 enum ttm_bo_type {
        ttm_bo_type_device,
-       ttm_bo_type_user,
        ttm_bo_type_kernel
 };
 
@@ -434,9 +429,9 @@ extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
  * -EBUSY if the buffer is busy and no_wait is true.
  * -ERESTARTSYS if interrupted by a signal.
  */
-
 extern int
 ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
+
 /**
  * ttm_bo_synccpu_write_release:
  *
@@ -447,6 +442,22 @@ ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
 extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
 
 /**
+ * ttm_bo_acc_size
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @bo_size: size of the buffer object in byte.
+ * @struct_size: size of the structure holding buffer object datas
+ *
+ * Returns size to account for a buffer object
+ */
+size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
+                      unsigned long bo_size,
+                      unsigned struct_size);
+size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
+                          unsigned long bo_size,
+                          unsigned struct_size);
+
+/**
  * ttm_bo_init
  *
  * @bdev: Pointer to a ttm_bo_device struct.
@@ -493,6 +504,7 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
                        struct file *persistent_swap_storage,
                        size_t acc_size,
                        void (*destroy) (struct ttm_buffer_object *));
+
 /**
  * ttm_bo_synccpu_object_init
  *
index 94eb143..d43e892 100644 (file)
@@ -43,36 +43,9 @@ struct ttm_backend;
 
 struct ttm_backend_func {
        /**
-        * struct ttm_backend_func member populate
-        *
-        * @backend: Pointer to a struct ttm_backend.
-        * @num_pages: Number of pages to populate.
-        * @pages: Array of pointers to ttm pages.
-        * @dummy_read_page: Page to be used instead of NULL pages in the
-        * array @pages.
-        * @dma_addrs: Array of DMA (bus) address of the ttm pages.
-        *
-        * Populate the backend with ttm pages. Depending on the backend,
-        * it may or may not copy the @pages array.
-        */
-       int (*populate) (struct ttm_backend *backend,
-                        unsigned long num_pages, struct page **pages,
-                        struct page *dummy_read_page,
-                        dma_addr_t *dma_addrs);
-       /**
-        * struct ttm_backend_func member clear
-        *
-        * @backend: Pointer to a struct ttm_backend.
-        *
-        * This is an "unpopulate" function. Release all resources
-        * allocated with populate.
-        */
-       void (*clear) (struct ttm_backend *backend);
-
-       /**
         * struct ttm_backend_func member bind
         *
-        * @backend: Pointer to a struct ttm_backend.
+        * @ttm: Pointer to a struct ttm_tt.
         * @bo_mem: Pointer to a struct ttm_mem_reg describing the
         * memory type and location for binding.
         *
@@ -80,46 +53,29 @@ struct ttm_backend_func {
         * indicated by @bo_mem. This function should be able to handle
         * differences between aperture and system page sizes.
         */
-       int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem);
+       int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
 
        /**
         * struct ttm_backend_func member unbind
         *
-        * @backend: Pointer to a struct ttm_backend.
+        * @ttm: Pointer to a struct ttm_tt.
         *
         * Unbind previously bound backend pages. This function should be
         * able to handle differences between aperture and system page sizes.
         */
-       int (*unbind) (struct ttm_backend *backend);
+       int (*unbind) (struct ttm_tt *ttm);
 
        /**
         * struct ttm_backend_func member destroy
         *
-        * @backend: Pointer to a struct ttm_backend.
+        * @ttm: Pointer to a struct ttm_tt.
         *
-        * Destroy the backend.
+        * Destroy the backend. This will be call back from ttm_tt_destroy so
+        * don't call ttm_tt_destroy from the callback or infinite loop.
         */
-       void (*destroy) (struct ttm_backend *backend);
-};
-
-/**
- * struct ttm_backend
- *
- * @bdev: Pointer to a struct ttm_bo_device.
- * @flags: For driver use.
- * @func: Pointer to a struct ttm_backend_func that describes
- * the backend methods.
- *
- */
-
-struct ttm_backend {
-       struct ttm_bo_device *bdev;
-       uint32_t flags;
-       struct ttm_backend_func *func;
+       void (*destroy) (struct ttm_tt *ttm);
 };
 
-#define TTM_PAGE_FLAG_USER            (1 << 1)
-#define TTM_PAGE_FLAG_USER_DIRTY      (1 << 2)
 #define TTM_PAGE_FLAG_WRITE           (1 << 3)
 #define TTM_PAGE_FLAG_SWAPPED         (1 << 4)
 #define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
@@ -135,23 +91,18 @@ enum ttm_caching_state {
 /**
  * struct ttm_tt
  *
+ * @bdev: Pointer to a struct ttm_bo_device.
+ * @func: Pointer to a struct ttm_backend_func that describes
+ * the backend methods.
  * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
  * pointer.
  * @pages: Array of pages backing the data.
- * @first_himem_page: Himem pages are put last in the page array, which
- * enables us to run caching attribute changes on only the first part
- * of the page array containing lomem pages. This is the index of the
- * first himem page.
- * @last_lomem_page: Index of the last lomem page in the page array.
  * @num_pages: Number of pages in the page array.
  * @bdev: Pointer to the current struct ttm_bo_device.
  * @be: Pointer to the ttm backend.
- * @tsk: The task for user ttm.
- * @start: virtual address for user ttm.
  * @swap_storage: Pointer to shmem struct file for swap storage.
  * @caching_state: The current caching state of the pages.
  * @state: The current binding state of the pages.
- * @dma_address: The DMA (bus) addresses of the pages (if TTM_PAGE_FLAG_DMA32)
  *
  * This is a structure holding the pages, caching- and aperture binding
  * status for a buffer object that isn't backed by fixed (VRAM / AGP)
@@ -159,16 +110,14 @@ enum ttm_caching_state {
  */
 
 struct ttm_tt {
+       struct ttm_bo_device *bdev;
+       struct ttm_backend_func *func;
        struct page *dummy_read_page;
        struct page **pages;
-       long first_himem_page;
-       long last_lomem_page;
        uint32_t page_flags;
        unsigned long num_pages;
        struct ttm_bo_global *glob;
        struct ttm_backend *be;
-       struct task_struct *tsk;
-       unsigned long start;
        struct file *swap_storage;
        enum ttm_caching_state caching_state;
        enum {
@@ -176,7 +125,23 @@ struct ttm_tt {
                tt_unbound,
                tt_unpopulated,
        } state;
+};
+
+/**
+ * struct ttm_dma_tt
+ *
+ * @ttm: Base ttm_tt struct.
+ * @dma_address: The DMA (bus) addresses of the pages
+ * @pages_list: used by some page allocation backend
+ *
+ * This is a structure holding the pages, caching- and aperture binding
+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * memory.
+ */
+struct ttm_dma_tt {
+       struct ttm_tt ttm;
        dma_addr_t *dma_address;
+       struct list_head pages_list;
 };
 
 #define TTM_MEMTYPE_FLAG_FIXED         (1 << 0)        /* Fixed (on-card) PCI memory */
@@ -351,15 +316,42 @@ struct ttm_mem_type_manager {
 
 struct ttm_bo_driver {
        /**
-        * struct ttm_bo_driver member create_ttm_backend_entry
+        * ttm_tt_create
         *
-        * @bdev: The buffer object device.
+        * @bdev: pointer to a struct ttm_bo_device:
+        * @size: Size of the data needed backing.
+        * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+        * @dummy_read_page: See struct ttm_bo_device.
         *
-        * Create a driver specific struct ttm_backend.
+        * Create a struct ttm_tt to back data with system memory pages.
+        * No pages are actually allocated.
+        * Returns:
+        * NULL: Out of memory.
         */
+       struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
+                                       unsigned long size,
+                                       uint32_t page_flags,
+                                       struct page *dummy_read_page);
 
-       struct ttm_backend *(*create_ttm_backend_entry)
-        (struct ttm_bo_device *bdev);
+       /**
+        * ttm_tt_populate
+        *
+        * @ttm: The struct ttm_tt to contain the backing pages.
+        *
+        * Allocate all backing pages
+        * Returns:
+        * -ENOMEM: Out of memory.
+        */
+       int (*ttm_tt_populate)(struct ttm_tt *ttm);
+
+       /**
+        * ttm_tt_unpopulate
+        *
+        * @ttm: The struct ttm_tt to contain the backing pages.
+        *
+        * Free all backing page
+        */
+       void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
 
        /**
         * struct ttm_bo_driver member invalidate_caches
@@ -477,9 +469,6 @@ struct ttm_bo_global_ref {
  * @dummy_read_page: Pointer to a dummy page used for mapping requests
  * of unpopulated pages.
  * @shrink: A shrink callback object used for buffer object swap.
- * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
- * used by a buffer object. This is excluding page arrays and backing pages.
- * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
  * @device_list_mutex: Mutex protecting the device list.
  * This mutex is held while traversing the device list for pm options.
  * @lru_lock: Spinlock protecting the bo subsystem lru lists.
@@ -497,8 +486,6 @@ struct ttm_bo_global {
        struct ttm_mem_global *mem_glob;
        struct page *dummy_read_page;
        struct ttm_mem_shrink shrink;
-       size_t ttm_bo_extra_size;
-       size_t ttm_bo_size;
        struct mutex device_list_mutex;
        spinlock_t lru_lock;
 
@@ -600,8 +587,9 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
 }
 
 /**
- * ttm_tt_create
+ * ttm_tt_init
  *
+ * @ttm: The struct ttm_tt.
  * @bdev: pointer to a struct ttm_bo_device:
  * @size: Size of the data needed backing.
  * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
@@ -612,28 +600,22 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
  * Returns:
  * NULL: Out of memory.
  */
-extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev,
-                                   unsigned long size,
-                                   uint32_t page_flags,
-                                   struct page *dummy_read_page);
+extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
+                       unsigned long size, uint32_t page_flags,
+                       struct page *dummy_read_page);
+extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
+                          unsigned long size, uint32_t page_flags,
+                          struct page *dummy_read_page);
 
 /**
- * ttm_tt_set_user:
+ * ttm_tt_fini
  *
- * @ttm: The struct ttm_tt to populate.
- * @tsk: A struct task_struct for which @start is a valid user-space address.
- * @start: A valid user-space address.
- * @num_pages: Size in pages of the user memory area.
+ * @ttm: the ttm_tt structure.
  *
- * Populate a struct ttm_tt with a user-space memory area after first pinning
- * the pages backing it.
- * Returns:
- * !0: Error.
+ * Free memory of ttm_tt structure
  */
-
-extern int ttm_tt_set_user(struct ttm_tt *ttm,
-                          struct task_struct *tsk,
-                          unsigned long start, unsigned long num_pages);
+extern void ttm_tt_fini(struct ttm_tt *ttm);
+extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
 
 /**
  * ttm_ttm_bind:
@@ -646,20 +628,11 @@ extern int ttm_tt_set_user(struct ttm_tt *ttm,
 extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
 
 /**
- * ttm_tt_populate:
- *
- * @ttm: The struct ttm_tt to contain the backing pages.
- *
- * Add backing pages to all of @ttm
- */
-extern int ttm_tt_populate(struct ttm_tt *ttm);
-
-/**
  * ttm_ttm_destroy:
  *
  * @ttm: The struct ttm_tt.
  *
- * Unbind, unpopulate and destroy a struct ttm_tt.
+ * Unbind, unpopulate and destroy common struct ttm_tt.
  */
 extern void ttm_tt_destroy(struct ttm_tt *ttm);
 
@@ -673,19 +646,13 @@ extern void ttm_tt_destroy(struct ttm_tt *ttm);
 extern void ttm_tt_unbind(struct ttm_tt *ttm);
 
 /**
- * ttm_ttm_destroy:
+ * ttm_tt_swapin:
  *
  * @ttm: The struct ttm_tt.
- * @index: Index of the desired page.
- *
- * Return a pointer to the struct page backing @ttm at page
- * index @index. If the page is unpopulated, one will be allocated to
- * populate that index.
  *
- * Returns:
- * NULL on OOM.
+ * Swap in a previously swap out ttm_tt.
  */
-extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
+extern int ttm_tt_swapin(struct ttm_tt *ttm);
 
 /**
  * ttm_tt_cache_flush:
@@ -1046,17 +1013,25 @@ extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
 #include <linux/agp_backend.h>
 
 /**
- * ttm_agp_backend_init
+ * ttm_agp_tt_create
  *
  * @bdev: Pointer to a struct ttm_bo_device.
  * @bridge: The agp bridge this device is sitting on.
+ * @size: Size of the data needed backing.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @dummy_read_page: See struct ttm_bo_device.
+ *
  *
  * Create a TTM backend that uses the indicated AGP bridge as an aperture
  * for TT memory. This function uses the linux agpgart interface to
  * bind and unbind memory backing a ttm_tt.
  */
-extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
-                                               struct agp_bridge_data *bridge);
+extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
+                                       struct agp_bridge_data *bridge,
+                                       unsigned long size, uint32_t page_flags,
+                                       struct page *dummy_read_page);
+int ttm_agp_tt_populate(struct ttm_tt *ttm);
+void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
 #endif
 
 #endif
index 129de12..5fe2740 100644 (file)
 #include "ttm_memory.h"
 
 /**
- * Get count number of pages from pool to pages list.
+ * Initialize pool allocator.
+ */
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
+/**
+ * Free pool allocator.
+ */
+void ttm_page_alloc_fini(void);
+
+/**
+ * ttm_pool_populate:
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
  *
- * @pages: head of empty linked list where pages are filled.
- * @flags: ttm flags for page allocation.
- * @cstate: ttm caching state for the page.
- * @count: number of pages to allocate.
- * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
+ * Add backing pages to all of @ttm
  */
-int ttm_get_pages(struct list_head *pages,
-                 int flags,
-                 enum ttm_caching_state cstate,
-                 unsigned count,
-                 dma_addr_t *dma_address);
+extern int ttm_pool_populate(struct ttm_tt *ttm);
+
 /**
- * Put linked list of pages to pool.
+ * ttm_pool_unpopulate:
+ *
+ * @ttm: The struct ttm_tt which to free backing pages.
  *
- * @pages: list of pages to free.
- * @page_count: number of pages in the list. Zero can be passed for unknown
- * count.
- * @flags: ttm flags for page allocation.
- * @cstate: ttm caching state.
- * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
+ * Free all pages of @ttm
  */
-void ttm_put_pages(struct list_head *pages,
-                  unsigned page_count,
-                  int flags,
-                  enum ttm_caching_state cstate,
-                  dma_addr_t *dma_address);
+extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
+
+/**
+ * Output the state of pools to debugfs file
+ */
+extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
+
+
+#ifdef CONFIG_SWIOTLB
 /**
  * Initialize pool allocator.
  */
-int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
+int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
+
 /**
  * Free pool allocator.
  */
-void ttm_page_alloc_fini(void);
+void ttm_dma_page_alloc_fini(void);
 
 /**
  * Output the state of pools to debugfs file
  */
-extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
+extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
+
+extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+
+#else
+static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
+                                         unsigned max_pages)
+{
+       return -ENODEV;
+}
+
+static inline void ttm_dma_page_alloc_fini(void) { return; }
+
+static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+       return 0;
+}
+#endif
+
 #endif
index fd11a5b..79b3b6e 100644 (file)
@@ -274,4 +274,8 @@ typedef struct drm_via_dmablit {
        drm_via_blitsync_t sync;
 } drm_via_dmablit_t;
 
+struct via_file_private {
+       struct list_head obj_list;
+};
+
 #endif                         /* _VIA_DRM_H_ */
index 445702c..e872526 100644 (file)
@@ -24,7 +24,7 @@ extern int swiotlb_force;
 
 extern void swiotlb_init(int verbose);
 extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
-extern unsigned long swioltb_nr_tbl(void);
+extern unsigned long swiotlb_nr_tbl(void);
 
 /*
  * Enumeration for sync targets
index 99093b3..058935e 100644 (file)
@@ -110,11 +110,11 @@ setup_io_tlb_npages(char *str)
 __setup("swiotlb=", setup_io_tlb_npages);
 /* make io_tlb_overflow tunable too? */
 
-unsigned long swioltb_nr_tbl(void)
+unsigned long swiotlb_nr_tbl(void)
 {
        return io_tlb_nslabs;
 }
-
+EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
 /* Note that this doesn't work with highmem page */
 static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
                                      volatile void *address)
@@ -321,6 +321,7 @@ void __init swiotlb_free(void)
                free_bootmem_late(__pa(io_tlb_start),
                                  PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
        }
+       io_tlb_nslabs = 0;
 }
 
 static int is_swiotlb_buffer(phys_addr_t paddr)