Merge remote branch 'origin/master' into modesetting-101
authorDave Airlie <airlied@redhat.com>
Fri, 25 Jan 2008 05:27:53 +0000 (15:27 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 25 Jan 2008 05:27:53 +0000 (15:27 +1000)
Conflicts:

linux-core/drm_bo.c
linux-core/drm_drv.c
shared-core/drm.h
shared-core/i915_dma.c
shared-core/i915_drv.h
shared-core/i915_irq.c
shared-core/radeon_irq.c

29 files changed:
1  2 
libdrm/xf86drm.c
linux-core/ati_pcigart.c
linux-core/drmP.h
linux-core/drm_agpsupport.c
linux-core/drm_bo.c
linux-core/drm_bo_move.c
linux-core/drm_drv.c
linux-core/drm_object.c
linux-core/drm_objects.h
linux-core/drm_vm.c
linux-core/i810_dma.c
linux-core/i915_buffer.c
linux-core/i915_drv.c
linux-core/radeon_buffer.c
linux-core/radeon_drv.c
linux-core/radeon_ms_drv.c
shared-core/drm.h
shared-core/drm_pciids.txt
shared-core/i915_dma.c
shared-core/i915_drm.h
shared-core/i915_drv.h
shared-core/i915_irq.c
shared-core/i915_mem.c
shared-core/radeon_cp.c
shared-core/radeon_drv.h
shared-core/radeon_irq.c
shared-core/radeon_ms.h
shared-core/radeon_ms_bo.c
shared-core/radeon_ms_drm.c

diff --combined libdrm/xf86drm.c
@@@ -87,9 -87,6 +87,9 @@@
  
  #define DRM_MSG_VERBOSITY 3
  
 +#define DRM_NODE_CONTROL 0
 +#define DRM_NODE_RENDER 1
 +
  static drmServerInfoPtr drm_server_info;
  
  void drmSetServerInfo(drmServerInfoPtr info)
@@@ -267,7 -264,7 +267,7 @@@ static int drmMatchBusID(const char *id
   * special file node with the major and minor numbers specified by \p dev and
   * parent directory if necessary and was called by root.
   */
 -static int drmOpenDevice(long dev, int minor)
 +static int drmOpenDevice(long dev, int minor, int type)
  {
      stat_t          st;
      char            buf[64];
      uid_t           user    = DRM_DEV_UID;
      gid_t           group   = DRM_DEV_GID, serv_group;
      
 -    sprintf(buf, DRM_DEV_NAME, DRM_DIR_NAME, minor);
 +    sprintf(buf, type ? DRM_DEV_NAME : DRM_CONTROL_DEV_NAME, DRM_DIR_NAME, minor);
      drmMsg("drmOpenDevice: node name is %s\n", buf);
  
      if (drm_server_info) {
   * Calls drmOpenDevice() if \p create is set, otherwise assembles the device
   * name from \p minor and opens it.
   */
 -static int drmOpenMinor(int minor, int create)
 +static int drmOpenMinor(int minor, int create, int type)
  {
      int  fd;
      char buf[64];
      
      if (create)
 -      return drmOpenDevice(makedev(DRM_MAJOR, minor), minor);
 +      return drmOpenDevice(makedev(DRM_MAJOR, minor), minor, type);
      
 -    sprintf(buf, DRM_DEV_NAME, DRM_DIR_NAME, minor);
 +    sprintf(buf, type ? DRM_DEV_NAME : DRM_CONTROL_DEV_NAME, DRM_DIR_NAME, minor);
      if ((fd = open(buf, O_RDWR, 0)) >= 0)
        return fd;
      return -errno;
@@@ -382,7 -379,7 +382,7 @@@ int drmAvailable(void
      int           retval = 0;
      int           fd;
  
 -    if ((fd = drmOpenMinor(0, 1)) < 0) {
 +    if ((fd = drmOpenMinor(0, 1, DRM_NODE_RENDER)) < 0) {
  #ifdef __linux__
        /* Try proc for backward Linux compatibility */
        if (!access("/proc/dri/0", R_OK))
@@@ -423,7 -420,7 +423,7 @@@ static int drmOpenByBusid(const char *b
  
      drmMsg("drmOpenByBusid: Searching for BusID %s\n", busid);
      for (i = 0; i < DRM_MAX_MINOR; i++) {
 -      fd = drmOpenMinor(i, 1);
 +      fd = drmOpenMinor(i, 1, DRM_NODE_RENDER);
        drmMsg("drmOpenByBusid: drmOpenMinor returns %d\n", fd);
        if (fd >= 0) {
            sv.drm_di_major = 1;
@@@ -485,7 -482,7 +485,7 @@@ static int drmOpenByName(const char *na
       * already in use.  If it's in use it will have a busid assigned already.
       */
      for (i = 0; i < DRM_MAX_MINOR; i++) {
 -      if ((fd = drmOpenMinor(i, 1)) >= 0) {
 +      if ((fd = drmOpenMinor(i, 1, DRM_NODE_RENDER)) >= 0) {
            if ((version = drmGetVersion(fd))) {
                if (!strcmp(version->name, name)) {
                    drmFreeVersion(version);
                        if (*pt) { /* Found busid */
                            return drmOpenByBusid(++pt);
                        } else { /* No busid */
 -                          return drmOpenDevice(strtol(devstring, NULL, 0),i);
 +                          return drmOpenDevice(strtol(devstring, NULL, 0),i, DRM_NODE_RENDER);
                        }
                    }
                }
@@@ -579,10 -576,6 +579,10 @@@ int drmOpen(const char *name, const cha
      return -1;
  }
  
 +int drmOpenControl(int minor)
 +{
 +    return drmOpenMinor(minor, 0, DRM_NODE_CONTROL);
 +}
  
  /**
   * Free the version information returned by drmGetVersion().
@@@ -2585,7 -2578,7 +2585,7 @@@ static void drmBOCopyReply(const struc
      buf->size = rep->size;
      buf->offset = rep->offset;
      buf->mapHandle = rep->arg_handle;
-     buf->mask = rep->mask;
+     buf->proposedFlags = rep->proposed_flags;
      buf->start = rep->buffer_start;
      buf->fenceFlags = rep->fence_flags;
      buf->replyFlags = rep->rep_flags;
  
  int drmBOCreate(int fd, unsigned long size,
                unsigned pageAlignment, void *user_buffer,
-               uint64_t mask,
+               uint64_t flags,
                unsigned hint, drmBO *buf)
  {
      struct drm_bo_create_arg arg;
  
      memset(buf, 0, sizeof(*buf));
      memset(&arg, 0, sizeof(arg));
-     req->mask = mask;
+     req->flags = flags;
      req->hint = hint;
      req->size = size;
      req->page_alignment = pageAlignment;
diff --combined linux-core/ati_pcigart.c
  
  # define ATI_PCIGART_PAGE_SIZE                4096    /**< PCI GART page size */
  
 +static __inline__ void insert_page_into_table(struct drm_ati_pcigart_info *info, u32 page_base, u32 *pci_gart)
 +{
 +      switch(info->gart_reg_if) {
 +      case DRM_ATI_GART_IGP:
 +              *pci_gart = cpu_to_le32((page_base) | 0xc);
 +              break;
 +      case DRM_ATI_GART_PCIE:
 +              *pci_gart = cpu_to_le32((page_base >> 8) | 0xc);
 +              break;
 +      default:
 +      case DRM_ATI_GART_PCI:
 +              *pci_gart = cpu_to_le32(page_base);
 +              break;
 +      }
 +}
 +
 +static __inline__ u32 get_page_base_from_table(struct drm_ati_pcigart_info *info, u32 *pci_gart)
 +{
 +      u32 retval;
 +      switch(info->gart_reg_if) {
 +      case DRM_ATI_GART_IGP:
 +              retval = *pci_gart;
 +              retval &= ~0xc;
 +              break;
 +      case DRM_ATI_GART_PCIE:
 +              retval = *pci_gart;
 +              retval &= ~0xc;
 +              retval <<= 8;
 +              break;
 +      default:
 +      case DRM_ATI_GART_PCI:
 +              retval = *pci_gart;
 +              break;
 +      }
 +      return retval;
 +}
 +
 +
 +
  static void *drm_ati_alloc_pcigart_table(int order)
  {
        unsigned long address;
        struct page *page;
        int i;
  
-       DRM_DEBUG("%s: alloc %d order\n", __FUNCTION__, order);
+       DRM_DEBUG("%d order\n", order);
  
        address = __get_free_pages(GFP_KERNEL | __GFP_COMP,
                                   order);
@@@ -97,7 -58,7 +97,7 @@@
                SetPageReserved(page);
        }
  
-       DRM_DEBUG("%s: returning 0x%08lx\n", __FUNCTION__, address);
+       DRM_DEBUG("returning 0x%08lx\n", address);
        return (void *)address;
  }
  
@@@ -106,7 -67,7 +106,7 @@@ static void drm_ati_free_pcigart_table(
        struct page *page;
        int i;
        int num_pages = 1 << order;
-       DRM_DEBUG("%s\n", __FUNCTION__);
+       DRM_DEBUG("\n");
  
        page = virt_to_page((unsigned long)address);
  
@@@ -246,7 -207,18 +246,7 @@@ int drm_ati_pcigart_init(struct drm_dev
                page_base = (u32) entry->busaddr[i];
  
                for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
 -                      switch(gart_info->gart_reg_if) {
 -                      case DRM_ATI_GART_IGP:
 -                              *pci_gart = cpu_to_le32((page_base) | 0xc);
 -                              break;
 -                      case DRM_ATI_GART_PCIE:
 -                              *pci_gart = cpu_to_le32((page_base >> 8) | 0xc);
 -                              break;
 -                      default:
 -                      case DRM_ATI_GART_PCI:
 -                              *pci_gart = cpu_to_le32(page_base);
 -                              break;
 -                      }
 +                      insert_page_into_table(gart_info, page_base, pci_gart);
                        pci_gart++;
                        page_base += ATI_PCIGART_PAGE_SIZE;
                }
        return ret;
  }
  EXPORT_SYMBOL(drm_ati_pcigart_init);
 +
 +static int ati_pcigart_needs_unbind_cache_adjust(struct drm_ttm_backend *backend)
 +{
 +      return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
 +}
 +
 +static int ati_pcigart_populate(struct drm_ttm_backend *backend,
 +                              unsigned long num_pages,
 +                              struct page **pages)
 +{
 +      ati_pcigart_ttm_backend_t *atipci_be =
 +              container_of(backend, ati_pcigart_ttm_backend_t, backend);
 +
 +      DRM_ERROR("%ld\n", num_pages);
 +      atipci_be->pages = pages;
 +      atipci_be->num_pages = num_pages;
 +      atipci_be->populated = 1;
 +      return 0;
 +}
 +
 +static int ati_pcigart_bind_ttm(struct drm_ttm_backend *backend,
 +                              struct drm_bo_mem_reg *bo_mem)
 +{
 +      ati_pcigart_ttm_backend_t *atipci_be =
 +              container_of(backend, ati_pcigart_ttm_backend_t, backend);
 +        off_t j;
 +      int i;
 +      struct drm_ati_pcigart_info *info = atipci_be->gart_info;
 +      u32 *pci_gart;
 +      u32 page_base;
 +      unsigned long offset = bo_mem->mm_node->start;
 +      pci_gart = info->addr;
 +
 +      DRM_ERROR("Offset is %08lX\n", bo_mem->mm_node->start);
 +        j = offset;
 +        while (j < (offset + atipci_be->num_pages)) {
 +              if (get_page_base_from_table(info, pci_gart+j))
 +                      return -EBUSY;
 +                j++;
 +        }
 +
 +        for (i = 0, j = offset; i < atipci_be->num_pages; i++, j++) {
 +              struct page *cur_page = atipci_be->pages[i];
 +                /* write value */
 +              page_base = page_to_phys(cur_page);
 +              insert_page_into_table(info, page_base, pci_gart + j);
 +        }
 +
 +#if defined(__i386__) || defined(__x86_64__)
 +      wbinvd();
 +#else
 +      mb();
 +#endif
 +
 +      atipci_be->gart_flush_fn(atipci_be->dev);
 +
 +      atipci_be->bound = 1;
 +      atipci_be->offset = offset;
 +        /* need to traverse table and add entries */
 +      DRM_DEBUG("\n");
 +      return 0;
 +}
 +
 +static int ati_pcigart_unbind_ttm(struct drm_ttm_backend *backend)
 +{
 +      ati_pcigart_ttm_backend_t *atipci_be =
 +              container_of(backend, ati_pcigart_ttm_backend_t, backend);
 +      struct drm_ati_pcigart_info *info = atipci_be->gart_info;       
 +      unsigned long offset = atipci_be->offset;
 +      int i;
 +      off_t j;
 +      u32 *pci_gart = info->addr;
 +
 +      DRM_DEBUG("\n");
 +
 +      if (atipci_be->bound != 1)
 +              return -EINVAL;
 +
 +      for (i = 0, j = offset; i < atipci_be->num_pages; i++, j++) {
 +              *(pci_gart + j) = 0;
 +      }
 +      atipci_be->gart_flush_fn(atipci_be->dev);
 +      atipci_be->bound = 0;
 +      atipci_be->offset = 0;
 +      return 0;
 +}
 +
 +static void ati_pcigart_clear_ttm(struct drm_ttm_backend *backend)
 +{
 +      ati_pcigart_ttm_backend_t *atipci_be =
 +              container_of(backend, ati_pcigart_ttm_backend_t, backend);
 +
 +      DRM_DEBUG("\n");        
 +      if (atipci_be->pages) {
 +              backend->func->unbind(backend);
 +              atipci_be->pages = NULL;
 +
 +      }
 +      atipci_be->num_pages = 0;
 +}
 +
 +static void ati_pcigart_destroy_ttm(struct drm_ttm_backend *backend)
 +{
 +      ati_pcigart_ttm_backend_t *atipci_be;
 +      if (backend) {
 +              DRM_DEBUG("\n");
 +              atipci_be = container_of(backend, ati_pcigart_ttm_backend_t, backend);
 +              if (atipci_be) {
 +                      if (atipci_be->pages) {
 +                              backend->func->clear(backend);
 +                      }
 +                      drm_ctl_free(atipci_be, sizeof(*atipci_be), DRM_MEM_TTM);
 +              }
 +      }
 +}
 +
 +static struct drm_ttm_backend_func ati_pcigart_ttm_backend = 
 +{
 +      .needs_ub_cache_adjust = ati_pcigart_needs_unbind_cache_adjust,
 +      .populate = ati_pcigart_populate,
 +      .clear = ati_pcigart_clear_ttm,
 +      .bind = ati_pcigart_bind_ttm,
 +      .unbind = ati_pcigart_unbind_ttm,
 +      .destroy =  ati_pcigart_destroy_ttm,
 +};
 +
 +struct drm_ttm_backend *ati_pcigart_init_ttm(struct drm_device *dev, struct drm_ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev))
 +{
 +      ati_pcigart_ttm_backend_t *atipci_be;
 +
 +      atipci_be = drm_ctl_calloc(1, sizeof (*atipci_be), DRM_MEM_TTM);
 +      if (!atipci_be)
 +              return NULL;
 +      
 +      atipci_be->populated = 0;
 +      atipci_be->backend.func = &ati_pcigart_ttm_backend;
 +      atipci_be->gart_info = info;
 +      atipci_be->gart_flush_fn = gart_flush_fn;
 +      atipci_be->dev = dev;
 +
 +      return &atipci_be->backend;
 +}
 +EXPORT_SYMBOL(ati_pcigart_init_ttm);
diff --combined linux-core/drmP.h
@@@ -104,10 -104,8 +104,8 @@@ struct drm_file
  #define DRIVER_HAVE_DMA    0x20
  #define DRIVER_HAVE_IRQ    0x40
  #define DRIVER_IRQ_SHARED  0x80
- #define DRIVER_IRQ_VBL     0x100
- #define DRIVER_DMA_QUEUE   0x200
- #define DRIVER_FB_DMA      0x400
- #define DRIVER_IRQ_VBL2    0x800
+ #define DRIVER_DMA_QUEUE   0x100
+ #define DRIVER_FB_DMA      0x200
  
  
  /*@}*/
  
  #include "drm_compat.h"
  
 +#include "drm_crtc.h"
 +
  /***********************************************************************/
  /** \name Macros to make printk easier */
  /*@{*/
@@@ -302,7 -298,6 +300,7 @@@ typedef int drm_ioctl_compat_t(struct f
  #define DRM_AUTH        0x1
  #define DRM_MASTER      0x2
  #define DRM_ROOT_ONLY   0x4
 +#define DRM_CONTROL_ALLOW 0x8 // allow ioctl to operate on control node 
  
  struct drm_ioctl_desc {
        unsigned int cmd;
@@@ -413,12 -408,13 +411,12 @@@ enum drm_ref_type 
  struct drm_file {
        int authenticated;
        int master;
 -      int minor;
        pid_t pid;
        uid_t uid;
        drm_magic_t magic;
        unsigned long ioctl_count;
        struct list_head lhead;
 -      struct drm_head *head;
 +      struct drm_minor *minor;
        int remove_auth_on_close;
        unsigned long lock_count;
  
        struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
        struct file *filp;
        void *driver_priv;
 +
 +      struct list_head fbs;
  };
  
  /** Wait queue */
@@@ -632,9 -626,51 +630,51 @@@ struct drm_driver 
        int (*context_dtor) (struct drm_device *dev, int context);
        int (*kernel_context_switch) (struct drm_device *dev, int old,
                                      int new);
-       void (*kernel_context_switch_unlock) (struct drm_device *dev);
-       int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence);
-       int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence);
+       void (*kernel_context_switch_unlock) (struct drm_device * dev);
+       /**
+        * get_vblank_counter - get raw hardware vblank counter
+        * @dev: DRM device
+        * @crtc: counter to fetch
+        *
+        * Driver callback for fetching a raw hardware vblank counter
+        * for @crtc.  If a device doesn't have a hardware counter, the
+        * driver can simply return the value of drm_vblank_count and
+        * make the enable_vblank() and disable_vblank() hooks into no-ops,
+        * leaving interrupts enabled at all times.
+        *
+        * Wraparound handling and loss of events due to modesetting is dealt
+        * with in the DRM core code.
+        *
+        * RETURNS
+        * Raw vblank counter value.
+        */
+       u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
+       /**
+        * enable_vblank - enable vblank interrupt events
+        * @dev: DRM device
+        * @crtc: which irq to enable
+        *
+        * Enable vblank interrupts for @crtc.  If the device doesn't have
+        * a hardware vblank counter, this routine should be a no-op, since
+        * interrupts will have to stay on to keep the count accurate.
+        *
+        * RETURNS
+        * Zero on success, appropriate errno if the given @crtc's vblank
+        * interrupt cannot be enabled.
+        */
+       int (*enable_vblank) (struct drm_device *dev, int crtc);
+       /**
+        * disable_vblank - disable vblank interrupt events
+        * @dev: DRM device
+        * @crtc: which irq to enable
+        *
+        * Disable vblank interrupts for @crtc.  If the device doesn't have
+        * a hardware vblank counter, this routine should be a no-op, since
+        * interrupts will have to stay on to keep the count accurate.
+        */
+       void (*disable_vblank) (struct drm_device *dev, int crtc);
        int (*dri_library_name) (struct drm_device *dev, char * buf);
  
        /**
  /* these have to be filled in */
         irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
        void (*irq_preinstall) (struct drm_device *dev);
-       void (*irq_postinstall) (struct drm_device *dev);
+       int (*irq_postinstall) (struct drm_device *dev);
        void (*irq_uninstall) (struct drm_device *dev);
        void (*reclaim_buffers) (struct drm_device *dev,
                                 struct drm_file *file_priv);
        void (*set_version) (struct drm_device *dev,
                             struct drm_set_version *sv);
  
 +      /* FB routines, if present */
 +      int (*fb_probe)(struct drm_device *dev, struct drm_crtc *crtc);
 +      int (*fb_remove)(struct drm_device *dev, struct drm_crtc *crtc);
 +      int (*fb_resize)(struct drm_device *dev, struct drm_crtc *crtc);
 +
        struct drm_fence_driver *fence_driver;
        struct drm_bo_driver *bo_driver;
  
        struct pci_driver pci_driver;
  };
  
 +#define DRM_MINOR_UNASSIGNED 0
 +#define DRM_MINOR_CONTROL 1
 +#define DRM_MINOR_RENDER 2
  /**
 - * DRM head structure. This structure represent a video head on a card
 - * that may contain multiple heads. Embed one per head of these in the
 - * private drm_device structure.
 + * DRM minor structure. This structure represents a drm minor number.
   */
 -struct drm_head {
 +struct drm_minor {
        int minor;                      /**< Minor device number */
 +      int type;                       /**< Control or render */
 +      dev_t device;                   /**< Device number for mknod */
 +      struct device kdev;             /**< Linux device */
        struct drm_device *dev;
 +      /* for render nodes */
        struct proc_dir_entry *dev_root;  /**< proc directory entry */
 -      dev_t device;                   /**< Device number for mknod */
        struct class_device *dev_class;
  };
  
   * may contain multiple heads.
   */
  struct drm_device {
 -      struct device dev;              /**< Linux device */
        char *unique;                   /**< Unique identifier: e.g., busid */
        int unique_len;                 /**< Length of unique field */
        char *devname;                  /**< For /proc/interrupts */
        /** \name VBLANK IRQ support */
        /*@{ */
  
-       wait_queue_head_t vbl_queue;    /**< VBLANK wait queue */
-       atomic_t vbl_received;
-       atomic_t vbl_received2;         /**< number of secondary VBLANK interrupts */
+       wait_queue_head_t *vbl_queue;   /**< VBLANK wait queue */
+       atomic_t *_vblank_count;        /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
        spinlock_t vbl_lock;
-       struct list_head vbl_sigs;              /**< signal list to send on VBLANK */
-       struct list_head vbl_sigs2;     /**< signals to send on secondary VBLANK */
-       unsigned int vbl_pending;
+       struct list_head *vbl_sigs;             /**< signal list to send on VBLANK */
+       atomic_t vbl_signal_pending;    /* number of signals pending on all crtcs*/
+       atomic_t *vblank_refcount;      /* number of users of vblank interrupts per crtc */
+       u32 *last_vblank;               /* protected by dev->vbl_lock, used */
+                                       /* for wraparound handling */
+       u32 *vblank_offset;             /* used to track how many vblanks */
+       u32 *vblank_premodeset;         /*  were lost during modeset */
+       struct timer_list vblank_disable_timer;
+       unsigned long max_vblank_count; /**< size of vblank counter register */
        spinlock_t tasklet_lock;        /**< For drm_locked_tasklet */
        void (*locked_tasklet_func)(struct drm_device *dev);
  
  #ifdef __alpha__
        struct pci_controller *hose;
  #endif
+       int num_crtcs;                  /**< Number of CRTCs on this device */
        struct drm_sg_mem *sg;          /**< Scatter gather memory */
        void *dev_private;              /**< device private data */
        struct drm_sigdata sigdata;             /**< For block_all_signals */
        struct drm_driver *driver;
        drm_local_map_t *agp_buffer_map;
        unsigned int agp_buffer_token;
 -      struct drm_head primary;                /**< primary screen head */
 +
 +      /* minor number for control node */
 +      struct drm_minor control;
 +      struct drm_minor primary;               /**< primary screen head */
  
        struct drm_fence_manager fm;
        struct drm_buffer_manager bm;
        spinlock_t drw_lock;
        struct idr drw_idr;
        /*@} */
 +
 +      /* DRM mode setting */
 +      struct drm_mode_config mode_config;
  };
  
  #if __OS_HAS_AGP
@@@ -860,17 -889,6 +907,17 @@@ struct drm_agp_ttm_backend 
  };
  #endif
  
 +typedef struct ati_pcigart_ttm_backend {
 +      struct drm_ttm_backend backend;
 +      int populated;
 +      void (*gart_flush_fn)(struct drm_device *dev);
 +      struct drm_ati_pcigart_info *gart_info;
 +      unsigned long offset;
 +      struct page **pages;
 +      int num_pages;
 +      int bound;
 +      struct drm_device *dev;
 +} ati_pcigart_ttm_backend_t;
  
  static __inline__ int drm_core_check_feature(struct drm_device *dev,
                                             int feature)
@@@ -1113,11 -1131,19 +1160,19 @@@ extern void drm_driver_irq_preinstall(s
  extern void drm_driver_irq_postinstall(struct drm_device *dev);
  extern void drm_driver_irq_uninstall(struct drm_device *dev);
  
- extern int drm_wait_vblank(struct drm_device *dev, void *data,
-                          struct drm_file *file_priv);
- extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
- extern void drm_vbl_send_signals(struct drm_device *dev);
+ extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
+ extern int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *filp);
+ extern int drm_vblank_wait(struct drm_device * dev, unsigned int *vbl_seq);
  extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
+ extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
+ extern void drm_update_vblank_count(struct drm_device *dev, int crtc);
+ extern void drm_handle_vblank(struct drm_device *dev, int crtc);
+ extern int drm_vblank_get(struct drm_device *dev, int crtc);
+ extern void drm_vblank_put(struct drm_device *dev, int crtc);
+                               /* Modesetting support */
+ extern int drm_modeset_ctl(struct drm_device *dev, void *data,
+                          struct drm_file *file_priv);
  
                                /* AGP/GART support (drm_agpsupport.h) */
  extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
@@@ -1159,10 -1185,10 +1214,10 @@@ extern void drm_agp_chipset_flush(struc
  extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
                     struct drm_driver *driver);
  extern int drm_put_dev(struct drm_device *dev);
 -extern int drm_put_head(struct drm_head * head);
 +extern int drm_put_minor(struct drm_minor *minor);
  extern unsigned int drm_debug; /* 1 to enable debug output */
 -extern unsigned int drm_cards_limit;
 -extern struct drm_head **drm_heads;
 +extern unsigned int drm_minors_limit;
 +extern struct drm_minor **drm_minors;
  extern struct class *drm_class;
  extern struct proc_dir_entry *drm_proc_root;
  
@@@ -1188,7 -1214,6 +1243,7 @@@ extern int drm_sg_free(struct drm_devic
                               /* ATI PCIGART support (ati_pcigart.h) */
  extern int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);
  extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);
 +extern struct drm_ttm_backend *ati_pcigart_init_ttm(struct drm_device *dev, struct drm_ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev));
  
  extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
                           size_t align, dma_addr_t maxaddr);
@@@ -1199,8 -1224,8 +1254,8 @@@ extern void drm_pci_free(struct drm_dev
  struct drm_sysfs_class;
  extern struct class *drm_sysfs_create(struct module *owner, char *name);
  extern void drm_sysfs_destroy(void);
 -extern int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head);
 -extern void drm_sysfs_device_remove(struct drm_device *dev);
 +extern int drm_sysfs_device_add(struct drm_minor *minor);
 +extern void drm_sysfs_device_remove(struct drm_minor *minor);
  
  /*
   * Basic memory manager support (drm_mm.c)
@@@ -1217,7 -1242,6 +1272,7 @@@ extern int drm_mm_clean(struct drm_mm *
  extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
  extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
  extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
 +extern void drm_mm_print(struct drm_mm *mm, const char *name);
  
  static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
  {
@@@ -130,7 -130,7 +130,7 @@@ EXPORT_SYMBOL(drm_agp_acquire)
  int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *file_priv)
  {
 -      return drm_agp_acquire((struct drm_device *) file_priv->head->dev);
 +      return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
  }
  
  /**
@@@ -505,12 -505,14 +505,14 @@@ static int drm_agp_needs_unbind_cache_a
  
  
  static int drm_agp_populate(struct drm_ttm_backend *backend,
-                           unsigned long num_pages, struct page **pages)
+                           unsigned long num_pages, struct page **pages,
+                           struct page *dummy_read_page)
  {
        struct drm_agp_ttm_backend *agp_be =
                container_of(backend, struct drm_agp_ttm_backend, backend);
        struct page **cur_page, **last_page = pages + num_pages;
        DRM_AGP_MEM *mem;
+       int dummy_page_count = 0;
  
        if (drm_alloc_memctl(num_pages * sizeof(void *)))
                return -1;
  
        DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
        mem->page_count = 0;
-       for (cur_page = pages; cur_page < last_page; ++cur_page)
-               mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page));
+       for (cur_page = pages; cur_page < last_page; ++cur_page) {
+               struct page *page = *cur_page;
+               if (!page) {
+                       page = dummy_read_page;
+                       ++dummy_page_count;
+               }
+               mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(page));
+       }
+       if (dummy_page_count)
+               DRM_DEBUG("Mapped %d dummy pages\n", dummy_page_count);
        agp_be->mem = mem;
        return 0;
  }
diff --combined linux-core/drm_bo.c
@@@ -80,7 -80,7 +80,7 @@@ void drm_bo_add_to_lru(struct drm_buffe
  
        DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
  
-       if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
+       if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
            || bo->mem.mem_type != bo->pinned_mem_type) {
                man = &bo->dev->bm.man[bo->mem.mem_type];
                list_add_tail(&bo->lru, &man->lru);
@@@ -137,27 -137,32 +137,32 @@@ static int drm_bo_add_ttm(struct drm_bu
  {
        struct drm_device *dev = bo->dev;
        int ret = 0;
+       uint32_t page_flags = 0;
  
        DRM_ASSERT_LOCKED(&bo->mutex);
        bo->ttm = NULL;
  
+       if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE)
+               page_flags |= DRM_TTM_PAGE_WRITE;
        switch (bo->type) {
-       case drm_bo_type_dc:
+       case drm_bo_type_device:
        case drm_bo_type_kernel:
-               bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
+               bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT, 
+                                        page_flags, dev->bm.dummy_read_page);
                if (!bo->ttm)
                        ret = -ENOMEM;
                break;
        case drm_bo_type_user:
-               bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
+               bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
+                                        page_flags | DRM_TTM_PAGE_USER,
+                                        dev->bm.dummy_read_page);
                if (!bo->ttm)
                        ret = -ENOMEM;
  
                ret = drm_ttm_set_user(bo->ttm, current,
-                                      bo->mem.mask & DRM_BO_FLAG_WRITE,
                                       bo->buffer_start,
-                                      bo->num_pages,
-                                      dev->bm.dummy_read_page);
+                                      bo->num_pages);
                if (ret)
                        return ret;
  
@@@ -199,7 -204,7 +204,7 @@@ static int drm_bo_handle_move_mem(struc
                        goto out_err;
  
                if (mem->mem_type != DRM_BO_MEM_LOCAL) {
-                       ret = drm_bind_ttm(bo->ttm, mem);
+                       ret = drm_ttm_bind(bo->ttm, mem);
                        if (ret)
                                goto out_err;
                }
  
                struct drm_bo_mem_reg *old_mem = &bo->mem;
                uint64_t save_flags = old_mem->flags;
-               uint64_t save_mask = old_mem->mask;
+               uint64_t save_proposed_flags = old_mem->proposed_flags;
  
                *old_mem = *mem;
                mem->mm_node = NULL;
-               old_mem->mask = save_mask;
+               old_mem->proposed_flags = save_proposed_flags;
                DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
  
        } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
@@@ -262,7 -267,7 +267,7 @@@ out_err
        new_man = &bm->man[bo->mem.mem_type];
        if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
                drm_ttm_unbind(bo->ttm);
-               drm_destroy_ttm(bo->ttm);
+               drm_ttm_destroy(bo->ttm);
                bo->ttm = NULL;
        }
  
@@@ -419,7 -424,7 +424,7 @@@ static void drm_bo_destroy_locked(struc
  
                if (bo->ttm) {
                        drm_ttm_unbind(bo->ttm);
-                       drm_destroy_ttm(bo->ttm);
+                       drm_ttm_destroy(bo->ttm);
                        bo->ttm = NULL;
                }
  
@@@ -569,6 -574,7 +574,6 @@@ void drm_putback_buffer_objects(struct 
  }
  EXPORT_SYMBOL(drm_putback_buffer_objects);
  
 -
  /*
   * Note. The caller has to register (if applicable)
   * and deregister fence object usage.
@@@ -702,7 -708,7 +707,7 @@@ static int drm_bo_evict(struct drm_buff
        evict_mem.mm_node = NULL;
  
        evict_mem = bo->mem;
-       evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
+       evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);
        ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
  
        if (ret) {
@@@ -866,7 -872,7 +871,7 @@@ int drm_bo_mem_space(struct drm_buffer_
  
                type_ok = drm_bo_mt_compatible(man,
                                               bo->type == drm_bo_type_user,
-                                              mem_type, mem->mask,
+                                              mem_type, mem->proposed_flags,
                                               &cur_flags);
  
                if (!type_ok)
                if (!drm_bo_mt_compatible(man,
                                          bo->type == drm_bo_type_user,
                                          mem_type,
-                                         mem->mask,
+                                         mem->proposed_flags,
                                          &cur_flags))
                        continue;
  
  }
  EXPORT_SYMBOL(drm_bo_mem_space);
  
- static int drm_bo_new_mask(struct drm_buffer_object *bo,
-                          uint64_t new_flags, uint64_t used_mask)
+ /*
+  * drm_bo_propose_flags:
+  *
+  * @bo: the buffer object getting new flags
+  *
+  * @new_flags: the new set of proposed flag bits
+  *
+  * @new_mask: the mask of bits changed in new_flags
+  *
+  * Modify the proposed_flag bits in @bo
+  */
+ static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo,
+                                        uint64_t new_flags, uint64_t new_mask)
  {
-       uint32_t new_props;
+       uint32_t new_access;
  
+       /* Copy unchanging bits from existing proposed_flags */
+       DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask);
+        
        if (bo->type == drm_bo_type_user &&
            ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
             (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
                return -EINVAL;
        }
  
-       if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
+       if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
                DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
                return -EPERM;
        }
                return -EPERM;
        }
  
-       new_props = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
-                                DRM_BO_FLAG_READ);
+       new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
+                                 DRM_BO_FLAG_READ);
  
-       if (!new_props) {
+       if (new_access == 0) {
                DRM_ERROR("Invalid buffer object rwx properties\n");
                return -EINVAL;
        }
  
-       bo->mem.mask = new_flags;
+       bo->mem.proposed_flags = new_flags;
        return 0;
  }
  
@@@ -1103,8 -1123,8 +1122,8 @@@ static int drm_bo_wait_unfenced(struct 
  
        ret = 0;
        mutex_unlock(&bo->mutex);
-       DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
-                   !drm_bo_check_unfenced(bo));
+       DRM_WAIT_ON (ret, bo->event_queue, 3 * DRM_HZ,
+                    !drm_bo_check_unfenced(bo));
        mutex_lock(&bo->mutex);
        if (ret == -EINTR)
                return -EAGAIN;
@@@ -1135,12 -1155,17 +1154,17 @@@ static void drm_bo_fill_rep_arg(struct 
        rep->size = bo->num_pages * PAGE_SIZE;
        rep->offset = bo->offset;
  
-       if (bo->type == drm_bo_type_dc)
+       /*
+        * drm_bo_type_device buffers have user-visible
+        * handles which can be used to share across
+        * processes. Hand that back to the application
+        */
+       if (bo->type == drm_bo_type_device)
                rep->arg_handle = bo->map_list.user_token;
        else
                rep->arg_handle = 0;
  
-       rep->mask = bo->mem.mask;
+       rep->proposed_flags = bo->mem.proposed_flags;
        rep->buffer_start = bo->buffer_start;
        rep->fence_flags = bo->fence_type;
        rep->rep_flags = 0;
@@@ -1164,7 -1189,7 +1188,7 @@@ static int drm_buffer_object_map(struc
                                 struct drm_bo_info_rep *rep)
  {
        struct drm_buffer_object *bo;
 -      struct drm_device *dev = file_priv->head->dev;
 +      struct drm_device *dev = file_priv->minor->dev;
        int ret = 0;
        int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
  
@@@ -1236,7 -1261,7 +1260,7 @@@ out
  
  static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
  {
 -      struct drm_device *dev = file_priv->head->dev;
 +      struct drm_device *dev = file_priv->minor->dev;
        struct drm_buffer_object *bo;
        struct drm_ref_object *ro;
        int ret = 0;
@@@ -1286,7 -1311,7 +1310,7 @@@ static void drm_buffer_user_object_unma
  
  /*
   * bo->mutex locked.
-  * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
+  * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
   */
  
  int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
  
        mem.num_pages = bo->num_pages;
        mem.size = mem.num_pages << PAGE_SHIFT;
-       mem.mask = new_mem_flags;
+       mem.proposed_flags = new_mem_flags;
        mem.page_alignment = bo->mem.page_alignment;
  
        mutex_lock(&bm->evict_mutex);
@@@ -1355,24 -1380,41 +1379,41 @@@ out_unlock
  
  static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
  {
-       uint32_t flag_diff = (mem->mask ^ mem->flags);
+       uint32_t flag_diff = (mem->proposed_flags ^ mem->flags);
  
-       if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
+       if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0)
                return 0;
        if ((flag_diff & DRM_BO_FLAG_CACHED) &&
-           (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
-            (mem->mask & DRM_BO_FLAG_FORCE_CACHING)))
+           (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/
+            (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING)))
                return 0;
  
        if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
-           ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
-            (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
+           ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) ||
+            (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE)))
                return 0;
        return 1;
  }
  
- /*
-  * bo locked.
+ /**
+  * drm_buffer_object_validate:
+  *
+  * @bo: the buffer object to modify
+  *
+  * @fence_class: the new fence class covering this buffer
+  *
+  * @move_unfenced: a boolean indicating whether switching the
+  * memory space of this buffer should cause the buffer to
+  * be placed on the unfenced list.
+  *
+  * @no_wait: whether this function should return -EBUSY instead
+  * of waiting.
+  *
+  * Change buffer access parameters. This can involve moving
+  * the buffer to the correct memory type, pinning the buffer
+  * or changing the class/type of fence covering this buffer
+  *
+  * Must be called with bo locked.
   */
  
  static int drm_buffer_object_validate(struct drm_buffer_object *bo,
        uint32_t ftype;
        int ret;
  
-       DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
-                 (unsigned long long) bo->mem.mask,
+       DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
+                 (unsigned long long) bo->mem.proposed_flags,
                  (unsigned long long) bo->mem.flags);
  
        ret = driver->fence_type(bo, &fence_class, &ftype);
         */
  
        if (!drm_bo_mem_compat(&bo->mem)) {
-               ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
+               ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,
                                         move_unfenced);
                if (ret) {
                        if (ret != -EAGAIN)
         * Pinned buffers.
         */
  
-       if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
+       if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
                bo->pinned_mem_type = bo->mem.mem_type;
                mutex_lock(&dev->struct_mutex);
                list_del_init(&bo->pinned_lru);
                if (ret)
                        return ret;
        }
-       DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
+       /*
+        * Validation has succeeded, move the access and other
+        * non-mapping-related flag bits from the proposed flags to
+        * the active flags
+        */
+       DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE);
  
        /*
         * Finally, adjust lru to be sure.
        return 0;
  }
  
+ /**
+  * drm_bo_do_validate:
+  *
+  * @bo:       the buffer object
+  *
+  * @flags: access rights, mapping parameters and cacheability. See
+  * the DRM_BO_FLAG_* values in drm.h
+  *
+  * @mask: Which flag values to change; this allows callers to modify
+  * things without knowing the current state of other flags.
+  *
+  * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
+  * values in drm.h.
+  *
+  * @fence_class: a driver-specific way of doing fences. Presumably,
+  * this would be used if the driver had more than one submission and
+  * fencing mechanism. At this point, there isn't any use of this
+  * from the user mode code.
+  *
+  * @rep: To be stuffed with the reply from validation
+  * 
+  * 'validate' a buffer object. This changes where the buffer is
+  * located, along with changing access modes.
+  */
  int drm_bo_do_validate(struct drm_buffer_object *bo,
                       uint64_t flags, uint64_t mask, uint32_t hint,
                       uint32_t fence_class,
-                      int no_wait,
                       struct drm_bo_info_rep *rep)
  {
        int ret;
+       int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
  
        mutex_lock(&bo->mutex);
        ret = drm_bo_wait_unfenced(bo, no_wait, 0);
        if (ret)
                goto out;
  
-       DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
-       ret = drm_bo_new_mask(bo, flags, mask);
+       ret = drm_bo_modify_proposed_flags (bo, flags, mask);
        if (ret)
                goto out;
  
  }
  EXPORT_SYMBOL(drm_bo_do_validate);
  
+ /**
+  * drm_bo_handle_validate
+  *
+  * @file_priv: the drm file private, used to get a handle to the user context
+  *
+  * @handle: the buffer object handle
+  *
+  * @flags: access rights, mapping parameters and cacheability. See
+  * the DRM_BO_FLAG_* values in drm.h
+  *
+  * @mask: Which flag values to change; this allows callers to modify
+  * things without knowing the current state of other flags.
+  *
+  * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
+  * values in drm.h.
+  *
+  * @fence_class: a driver-specific way of doing fences. Presumably,
+  * this would be used if the driver had more than one submission and
+  * fencing mechanism. At this point, there isn't any use of this
+  * from the user mode code.
+  *
+  * @use_old_fence_class: don't change fence class, pull it from the buffer object
+  *
+  * @rep: To be stuffed with the reply from validation
+  * 
+  * @bp_rep: To be stuffed with the buffer object pointer
+  *
+  * Perform drm_bo_do_validate on a buffer referenced by a user-space handle.
+  * Some permissions checking is done on the parameters, otherwise this
+  * is a thin wrapper.
+  */
  
  int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
-                          uint32_t fence_class,
                           uint64_t flags, uint64_t mask,
                           uint32_t hint,
+                          uint32_t fence_class,
                           int use_old_fence_class,
                           struct drm_bo_info_rep *rep,
                           struct drm_buffer_object **bo_rep)
  {
 -      struct drm_device *dev = file_priv->head->dev;
 +      struct drm_device *dev = file_priv->minor->dev;
        struct drm_buffer_object *bo;
        int ret;
-       int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
  
        mutex_lock(&dev->struct_mutex);
        bo = drm_lookup_buffer_object(file_priv, handle, 1);
                mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
  
  
-       ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
-                                no_wait, rep);
+       ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep);
  
        if (!ret && bo_rep)
                *bo_rep = bo;
@@@ -1581,7 -1681,7 +1680,7 @@@ EXPORT_SYMBOL(drm_bo_handle_validate)
  static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
                              struct drm_bo_info_rep *rep)
  {
 -      struct drm_device *dev = file_priv->head->dev;
 +      struct drm_device *dev = file_priv->minor->dev;
        struct drm_buffer_object *bo;
  
        mutex_lock(&dev->struct_mutex);
@@@ -1604,7 -1704,7 +1703,7 @@@ static int drm_bo_handle_wait(struct dr
                              uint32_t hint,
                              struct drm_bo_info_rep *rep)
  {
 -      struct drm_device *dev = file_priv->head->dev;
 +      struct drm_device *dev = file_priv->minor->dev;
        struct drm_buffer_object *bo;
        int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
        int ret;
@@@ -1635,7 -1735,7 +1734,7 @@@ out
  int drm_buffer_object_create(struct drm_device *dev,
                             unsigned long size,
                             enum drm_bo_type type,
-                            uint64_t mask,
+                            uint64_t flags,
                             uint32_t hint,
                             uint32_t page_alignment,
                             unsigned long buffer_start,
        size += buffer_start & ~PAGE_MASK;
        num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
        if (num_pages == 0) {
-               DRM_ERROR("Illegal buffer object size %d.\n", size);
 -              DRM_ERROR("Illegal buffer object size.\n");
++              DRM_ERROR("Illegal buffer object size %ld.\n", size);
                return -EINVAL;
        }
  
        bo->mem.page_alignment = page_alignment;
        bo->buffer_start = buffer_start & PAGE_MASK;
        bo->priv_flags = 0;
-       bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
-               DRM_BO_FLAG_MAPPABLE;
-       bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
-               DRM_BO_FLAG_MAPPABLE;
+       bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
+                        DRM_BO_FLAG_MAPPABLE);
+       bo->mem.proposed_flags = 0;
        atomic_inc(&bm->count);
-       ret = drm_bo_new_mask(bo, mask, mask);
+       /*
+        * Use drm_bo_modify_proposed_flags to error-check the proposed flags
+        */
+       ret = drm_bo_modify_proposed_flags (bo, flags, flags);
        if (ret)
                goto out_err;
  
-       if (bo->type == drm_bo_type_dc) {
+       /*
+        * For drm_bo_type_device buffers, allocate
+        * address space from the device so that applications
+        * can mmap the buffer from there
+        */
+       if (bo->type == drm_bo_type_device) {
                mutex_lock(&dev->struct_mutex);
                ret = drm_bo_setup_vm_locked(bo);
                mutex_unlock(&dev->struct_mutex);
@@@ -1717,7 -1824,7 +1823,7 @@@ EXPORT_SYMBOL(drm_buffer_object_create)
  static int drm_bo_add_user_object(struct drm_file *file_priv,
                                  struct drm_buffer_object *bo, int shareable)
  {
 -      struct drm_device *dev = file_priv->head->dev;
 +      struct drm_device *dev = file_priv->minor->dev;
        int ret;
  
        mutex_lock(&dev->struct_mutex);
@@@ -1752,20 -1859,28 +1858,28 @@@ int drm_bo_create_ioctl(struct drm_devi
                return -EINVAL;
        }
  
-       bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_dc;
+       /*
+        * If the buffer creation request comes in with a starting address,
+        * that points at the desired user pages to map. Otherwise, create
+        * a drm_bo_type_device buffer, which uses pages allocated from the kernel
+        */
+       bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_device;
  
+       /*
+        * User buffers cannot be shared
+        */
        if (bo_type == drm_bo_type_user)
-               req->mask &= ~DRM_BO_FLAG_SHAREABLE;
+               req->flags &= ~DRM_BO_FLAG_SHAREABLE;
  
 -      ret = drm_buffer_object_create(file_priv->head->dev,
 +      ret = drm_buffer_object_create(file_priv->minor->dev,
-                                      req->size, bo_type, req->mask,
+                                      req->size, bo_type, req->flags,
                                       req->hint, req->page_alignment,
                                       req->buffer_start, &entry);
        if (ret)
                goto out;
  
        ret = drm_bo_add_user_object(file_priv, entry,
-                                    req->mask & DRM_BO_FLAG_SHAREABLE);
+                                    req->flags & DRM_BO_FLAG_SHAREABLE);
        if (ret) {
                drm_bo_usage_deref_unlocked(&entry);
                goto out;
@@@ -1796,11 -1911,17 +1910,17 @@@ int drm_bo_setstatus_ioctl(struct drm_d
        if (ret)
                return ret;
  
-       ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class,
+       /*
+        * validate the buffer. note that 'fence_class' will be unused
+        * as we pass use_old_fence_class=1 here. Note also that
+        * the libdrm API doesn't pass fence_class to the kernel,
+        * so it's a good thing it isn't used here.
+        */
+       ret = drm_bo_handle_validate(file_priv, req->handle,
                                     req->flags,
                                     req->mask,
                                     req->hint | DRM_BO_HINT_DONT_FENCE,
-                                    1,
+                                    req->fence_class, 1,
                                     rep, NULL);
  
        (void) drm_bo_read_unlock(&dev->bm.bm_lock);
@@@ -1951,7 -2072,7 +2071,7 @@@ static int drm_bo_leave_list(struct drm
                DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
                          "cleanup. Removing flag and evicting.\n");
                bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
-               bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
+               bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT;
        }
  
        if (bo->mem.mem_type == mem_type)
@@@ -2221,7 -2342,6 +2341,7 @@@ out
        mutex_unlock(&dev->struct_mutex);
        return ret;
  }
 +EXPORT_SYMBOL(drm_bo_driver_finish);
  
  /*
   * This function is intended to be called on drm driver load.
@@@ -2502,6 -2622,14 +2622,14 @@@ void drm_bo_unmap_virtual(struct drm_bu
        unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
  }
  
+ /**
+  * drm_bo_takedown_vm_locked:
+  *
+  * @bo: the buffer object to remove any drm device mapping
+  *
+  * Remove any associated vm mapping on the drm device node that
+  * would have been created for a drm_bo_type_device buffer
+  */
  static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
  {
        struct drm_map_list *list;
        struct drm_device *dev = bo->dev;
  
        DRM_ASSERT_LOCKED(&dev->struct_mutex);
-       if (bo->type != drm_bo_type_dc)
+       if (bo->type != drm_bo_type_device)
                return;
  
        list = &bo->map_list;
        drm_bo_usage_deref_locked(&bo);
  }
  
+ /**
+  * drm_bo_setup_vm_locked:
+  *
+  * @bo: the buffer to allocate address space for
+  *
+  * Allocate address space in the drm device so that applications
+  * can mmap the buffer and access the contents. This only
+  * applies to drm_bo_type_device objects as others are not
+  * placed in the drm device address space.
+  */
  static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
  {
        struct drm_map_list *list = &bo->map_list;
diff --combined linux-core/drm_bo_move.c
@@@ -54,7 -54,7 +54,7 @@@ int drm_bo_move_ttm(struct drm_buffer_o
        struct drm_ttm *ttm = bo->ttm;
        struct drm_bo_mem_reg *old_mem = &bo->mem;
        uint64_t save_flags = old_mem->flags;
-       uint64_t save_mask = old_mem->mask;
+       uint64_t save_proposed_flags = old_mem->proposed_flags;
        int ret;
  
        if (old_mem->mem_type == DRM_BO_MEM_TT) {
                save_flags = old_mem->flags;
        }
        if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
-               ret = drm_bind_ttm(ttm, new_mem);
+               ret = drm_ttm_bind(ttm, new_mem);
                if (ret)
                        return ret;
        }
  
        *old_mem = *new_mem;
        new_mem->mm_node = NULL;
-       old_mem->mask = save_mask;
+       old_mem->proposed_flags = save_proposed_flags;
        DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
        return 0;
  }
@@@ -147,7 -147,6 +147,7 @@@ void drm_mem_reg_iounmap(struct drm_dev
        if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))
                iounmap(virtual);
  }
 +EXPORT_SYMBOL(drm_mem_reg_iounmap);
  
  static int drm_copy_io_page(void *dst, void *src, unsigned long page)
  {
@@@ -211,7 -210,7 +211,7 @@@ int drm_bo_move_memcpy(struct drm_buffe
        void *new_iomap;
        int ret;
        uint64_t save_flags = old_mem->flags;
-       uint64_t save_mask = old_mem->mask;
+       uint64_t save_proposed_flags = old_mem->proposed_flags;
        unsigned long i;
        unsigned long page;
        unsigned long add = 0;
@@@ -256,12 -255,12 +256,12 @@@ out2
  
        *old_mem = *new_mem;
        new_mem->mm_node = NULL;
-       old_mem->mask = save_mask;
+       old_mem->proposed_flags = save_proposed_flags;
        DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
  
        if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
                drm_ttm_unbind(ttm);
-               drm_destroy_ttm(ttm);
+               drm_ttm_destroy(ttm);
                bo->ttm = NULL;
        }
  
@@@ -331,7 -330,7 +331,7 @@@ int drm_bo_move_accel_cleanup(struct dr
        struct drm_bo_mem_reg *old_mem = &bo->mem;
        int ret;
        uint64_t save_flags = old_mem->flags;
-       uint64_t save_mask = old_mem->mask;
+       uint64_t save_proposed_flags = old_mem->proposed_flags;
        struct drm_buffer_object *old_obj;
  
        if (bo->fence)
  
                if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
                        drm_ttm_unbind(bo->ttm);
-                       drm_destroy_ttm(bo->ttm);
+                       drm_ttm_destroy(bo->ttm);
                        bo->ttm = NULL;
                }
        } else {
  
        *old_mem = *new_mem;
        new_mem->mm_node = NULL;
-       old_mem->mask = save_mask;
+       old_mem->proposed_flags = save_proposed_flags;
        DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
        return 0;
  }
diff --combined linux-core/drm_drv.c
@@@ -56,7 -56,7 +56,7 @@@ static int drm_version(struct drm_devic
  
  /** Ioctl table */
  static struct drm_ioctl_desc drm_ioctls[] = {
 -      DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
 +      DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_CONTROL_ALLOW),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
        DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
        DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 +      DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_ROOT_ONLY|DRM_CONTROL_ALLOW),
 +      DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_ROOT_ONLY|DRM_CONTROL_ALLOW),
 +      DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETOUTPUT, drm_mode_getoutput, DRM_MASTER|DRM_ROOT_ONLY|DRM_CONTROL_ALLOW),
 +      DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_ROOT_ONLY|DRM_CONTROL_ALLOW),
 +      DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_ROOT_ONLY|DRM_CONTROL_ALLOW),
 +      DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_ROOT_ONLY|DRM_CONTROL_ALLOW),
 +      DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_ROOT_ONLY|DRM_CONTROL_ALLOW),
 +
 +      DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_output_property_set_ioctl, DRM_MASTER|DRM_ROOT_ONLY|DRM_CONTROL_ALLOW),
 +      DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_ROOT_ONLY|DRM_CONTROL_ALLOW),
 +      DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_ROOT_ONLY|DRM_CONTROL_ALLOW),
 +      DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_ROOT_ONLY|DRM_CONTROL_ALLOW),
 +      DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_ROOT_ONLY | DRM_CONTROL_ALLOW),
 +
++
        DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,
                      DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,
@@@ -181,11 -171,12 +186,11 @@@ int drm_lastclose(struct drm_device * d
  
        DRM_DEBUG("\n");
  
 +/*    return 0; */
        /*
         * We can't do much about this function failing.
         */
  
 -      drm_bo_driver_finish(dev);
 -
        if (dev->driver->lastclose)
                dev->driver->lastclose(dev);
        DRM_DEBUG("driver lastclose completed\n");
                dev->unique_len = 0;
        }
  
 -      if (dev->irq_enabled)
 -              drm_irq_uninstall(dev);
 +/*    if (dev->irq_enabled)
 +              drm_irq_uninstall(dev); */
  
        /* Free drawable information memory */
        mutex_lock(&dev->struct_mutex);
                drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);
        }
  
 +      /*
        list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
                if (!(r_list->map->flags & _DRM_DRIVER)) {
                        drm_rmmap_locked(dev, r_list->map);
                        r_list = NULL;
                }
 -      }
 +      }*/
  
        if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
                for (i = 0; i < dev->queue_count; i++) {
        if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
                drm_dma_takedown(dev);
  
 -      if (dev->lock.hw_lock) {
 -              dev->sigdata.lock = dev->lock.hw_lock = NULL;   /* SHM removed */
 -              dev->lock.file_priv = NULL;
 -              wake_up_interruptible(&dev->lock.lock_queue);
 -      }
        dev->dev_mapping = NULL;
        mutex_unlock(&dev->struct_mutex);
  
@@@ -406,14 -401,12 +411,14 @@@ static void drm_cleanup(struct drm_devi
                DRM_DEBUG("mtrr_del=%d\n", retval);
        }
  
 +      if (dev->driver->unload)
 +              dev->driver->unload(dev);
 +        
 +      drm_ht_remove(&dev->map_hash);
        if (drm_core_has_AGP(dev) && dev->agp) {
                drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
                dev->agp = NULL;
        }
 -      if (dev->driver->unload)
 -              dev->driver->unload(dev);
  
        if (!drm_fb_loaded)
                pci_disable_device(dev->pdev);
        drm_mm_takedown(&dev->offset_manager);
        drm_ht_remove(&dev->object_hash);
  
 -      drm_put_head(&dev->primary);
 +      drm_put_minor(&dev->primary);
 +      drm_put_minor(&dev->control);
        if (drm_put_dev(dev))
                DRM_ERROR("Cannot unload module\n");
  }
@@@ -433,19 -425,19 +438,19 @@@ void drm_exit(struct drm_driver *driver
  {
        int i;
        struct drm_device *dev = NULL;
 -      struct drm_head *head;
 +      struct drm_minor *minor;
  
        DRM_DEBUG("\n");
        if (drm_fb_loaded) {
 -              for (i = 0; i < drm_cards_limit; i++) {
 -                      head = drm_heads[i];
 -                      if (!head)
 +              for (i = 0; i < drm_minors_limit; i++) {
 +                      minor = drm_minors[i];
 +                      if (!minor)
                                continue;
 -                      if (!head->dev)
 +                      if (!minor->dev)
                                continue;
 -                      if (head->dev->driver != driver)
 +                      if (minor->dev->driver != driver)
                                continue;
 -                      dev = head->dev;
 +                      dev = minor->dev;
                        if (dev) {
                                /* release the pci driver */
                                if (dev->pdev)
@@@ -496,10 -488,10 +501,10 @@@ static int __init drm_core_init(void
        drm_init_memctl(avail_memctl_mem/2, avail_memctl_mem*3/4, si.mem_unit);
  
        ret = -ENOMEM;
 -      drm_cards_limit =
 -          (drm_cards_limit < DRM_MAX_MINOR + 1 ? drm_cards_limit : DRM_MAX_MINOR + 1);
 -      drm_heads = drm_calloc(drm_cards_limit, sizeof(*drm_heads), DRM_MEM_STUB);
 -      if (!drm_heads)
 +      drm_minors_limit =
 +          (drm_minors_limit < DRM_MAX_MINOR + 1 ? drm_minors_limit : DRM_MAX_MINOR + 1);
 +      drm_minors = drm_calloc(drm_minors_limit, sizeof(*drm_minors), DRM_MEM_STUB);
 +      if (!drm_minors)
                goto err_p1;
  
        if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
@@@ -529,7 -521,7 +534,7 @@@ err_p3
        drm_sysfs_destroy();
  err_p2:
        unregister_chrdev(DRM_MAJOR, "drm");
 -      drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB);
 +      drm_free(drm_minors, sizeof(*drm_minors) * drm_minors_limit, DRM_MEM_STUB);
  err_p1:
        return ret;
  }
@@@ -541,7 -533,7 +546,7 @@@ static void __exit drm_core_exit(void
  
        unregister_chrdev(DRM_MAJOR, "drm");
  
 -      drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB);
 +      drm_free(drm_minors, sizeof(*drm_minors) * drm_minors_limit, DRM_MEM_STUB);
  }
  
  module_init(drm_core_init);
@@@ -601,7 -593,7 +606,7 @@@ EXPORT_SYMBOL(drm_ioctl)
  long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  {
        struct drm_file *file_priv = filp->private_data;
 -      struct drm_device *dev = file_priv->head->dev;
 +      struct drm_device *dev = file_priv->minor->dev;
        struct drm_ioctl_desc *ioctl;
        drm_ioctl_t *func;
        unsigned int nr = DRM_IOCTL_NR(cmd);
        ++file_priv->ioctl_count;
  
        DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
 -                current->pid, cmd, nr, (long)old_encode_dev(file_priv->head->device),
 +                current->pid, cmd, nr, (long)old_encode_dev(file_priv->minor->device),
                  file_priv->authenticated);
  
        if ((nr >= DRM_CORE_IOCTL_COUNT) &&
                retcode = -EINVAL;
                goto err_i1;
        }
 +        
  #if 0
        /*
         * This check is disabled, because driver private ioctl->cmd
                retcode = -EINVAL;
        } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
                   ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
 -                 ((ioctl->flags & DRM_MASTER) && !file_priv->master)) {
 +                 ((ioctl->flags & DRM_MASTER) && !file_priv->master) ||
 +                 ((!(ioctl->flags & DRM_CONTROL_ALLOW)) && (file_priv->minor->type == DRM_MINOR_CONTROL)) ) {
                retcode = -EACCES;
        } else {
                retcode = func(dev, kdata, file_priv);
diff --combined linux-core/drm_object.c
@@@ -33,7 -33,7 +33,7 @@@
  int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
                        int shareable)
  {
 -      struct drm_device *dev = priv->head->dev;
 +      struct drm_device *dev = priv->minor->dev;
        int ret;
  
        DRM_ASSERT_LOCKED(&dev->struct_mutex);
@@@ -44,7 -44,7 +44,7 @@@
        item->owner = priv;
  
        ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash,
-                                       (unsigned long)item, 32, 0, 0);
+                                       (unsigned long)item, 31, 0, 0);
        if (ret)
                return ret;
  
@@@ -58,7 -58,7 +58,7 @@@ EXPORT_SYMBOL(drm_add_user_object)
  
  struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, uint32_t key)
  {
 -      struct drm_device *dev = priv->head->dev;
 +      struct drm_device *dev = priv->minor->dev;
        struct drm_hash_item *hash;
        int ret;
        struct drm_user_object *item;
@@@ -85,7 -85,7 +85,7 @@@ EXPORT_SYMBOL(drm_lookup_user_object)
  
  static void drm_deref_user_object(struct drm_file *priv, struct drm_user_object *item)
  {
 -      struct drm_device *dev = priv->head->dev;
 +      struct drm_device *dev = priv->minor->dev;
        int ret;
  
        if (atomic_dec_and_test(&item->refcount)) {
@@@ -121,7 -121,7 +121,7 @@@ int drm_add_ref_object(struct drm_file 
        struct drm_ref_object *item;
        struct drm_open_hash *ht = &priv->refd_object_hash[ref_action];
  
 -      DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
 +      DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
        if (!referenced_object->shareable && priv != referenced_object->owner) {
                DRM_ERROR("Not allowed to reference this object\n");
                return -EINVAL;
@@@ -178,7 -178,7 +178,7 @@@ struct drm_ref_object *drm_lookup_ref_o
        struct drm_hash_item *hash;
        int ret;
  
 -      DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
 +      DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
        ret = drm_ht_find_item(&priv->refd_object_hash[ref_action],
                               (unsigned long)referenced_object, &hash);
        if (ret)
@@@ -212,7 -212,7 +212,7 @@@ void drm_remove_ref_object(struct drm_f
        struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action];
        enum drm_ref_type unref_action;
  
 -      DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
 +      DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
        unref_action = item->unref_action;
        if (atomic_dec_and_test(&item->refcount)) {
                ret = drm_ht_remove_item(ht, &item->hash);
@@@ -239,7 -239,7 +239,7 @@@ EXPORT_SYMBOL(drm_remove_ref_object)
  int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
                        enum drm_object_type type, struct drm_user_object **object)
  {
 -      struct drm_device *dev = priv->head->dev;
 +      struct drm_device *dev = priv->minor->dev;
        struct drm_user_object *uo;
        struct drm_hash_item *hash;
        int ret;
@@@ -269,7 -269,7 +269,7 @@@ out_err
  int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
                          enum drm_object_type type)
  {
 -      struct drm_device *dev = priv->head->dev;
 +      struct drm_device *dev = priv->minor->dev;
        struct drm_user_object *uo;
        struct drm_ref_object *ro;
        int ret;
diff --combined linux-core/drm_objects.h
@@@ -157,6 -157,7 +157,6 @@@ struct drm_fence_object 
  };
  
  #define _DRM_FENCE_CLASSES 8
 -#define _DRM_FENCE_TYPE_EXE 0x00
  
  struct drm_fence_class_manager {
        struct list_head ring;
@@@ -262,7 -263,8 +262,8 @@@ struct drm_ttm_backend
  struct drm_ttm_backend_func {
        int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend);
        int (*populate) (struct drm_ttm_backend *backend,
-                        unsigned long num_pages, struct page **pages);
+                        unsigned long num_pages, struct page **pages,
+                        struct page *dummy_read_page);
        void (*clear) (struct drm_ttm_backend *backend);
        int (*bind) (struct drm_ttm_backend *backend,
                     struct drm_bo_mem_reg *bo_mem);
@@@ -296,8 -298,10 +297,10 @@@ struct drm_ttm 
  
  };
  
- extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size);
- extern int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem);
+ extern struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size,
+                                     uint32_t page_flags,
+                                     struct page *dummy_read_page);
+ extern int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem);
  extern void drm_ttm_unbind(struct drm_ttm *ttm);
  extern void drm_ttm_evict(struct drm_ttm *ttm);
  extern void drm_ttm_fixup_caching(struct drm_ttm *ttm);
@@@ -306,10 -310,8 +309,8 @@@ extern void drm_ttm_cache_flush(void)
  extern int drm_ttm_populate(struct drm_ttm *ttm);
  extern int drm_ttm_set_user(struct drm_ttm *ttm,
                            struct task_struct *tsk,
-                           int write,
                            unsigned long start,
-                           unsigned long num_pages,
-                           struct page *dummy_read_page);
+                           unsigned long num_pages);
  
  /*
   * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do
   * Otherwise it is called when the last vma exits.
   */
  
- extern int drm_destroy_ttm(struct drm_ttm *ttm);
+ extern int drm_ttm_destroy(struct drm_ttm *ttm);
  
  #define DRM_FLAG_MASKED(_old, _new, _mask) {\
  (_old) ^= (((_old) ^ (_new)) & (_mask)); \
   * Page flags.
   */
  
+ /*
+  * This ttm should not be cached by the CPU
+  */
  #define DRM_TTM_PAGE_UNCACHED   (1 << 0)
+ /*
+  * This flat is not used at this time; I don't know what the
+  * intent was
+  */
  #define DRM_TTM_PAGE_USED       (1 << 1)
+ /*
+  * This flat is not used at this time; I don't know what the
+  * intent was
+  */
  #define DRM_TTM_PAGE_BOUND      (1 << 2)
+ /*
+  * This flat is not used at this time; I don't know what the
+  * intent was
+  */
  #define DRM_TTM_PAGE_PRESENT    (1 << 3)
+ /*
+  * The array of page pointers was allocated with vmalloc
+  * instead of drm_calloc.
+  */
  #define DRM_TTM_PAGE_VMALLOC    (1 << 4)
+ /*
+  * This ttm is mapped from user space
+  */
  #define DRM_TTM_PAGE_USER       (1 << 5)
- #define DRM_TTM_PAGE_USER_WRITE (1 << 6)
+ /*
+  * This ttm will be written to by the GPU
+  */
+ #define DRM_TTM_PAGE_WRITE    (1 << 6)
+ /*
+  * This ttm was mapped to the GPU, and so the contents may have
+  * been modified
+  */
  #define DRM_TTM_PAGE_USER_DIRTY (1 << 7)
+ /*
+  * This flag is not used at this time; I don't know what the
+  * intent was.
+  */
  #define DRM_TTM_PAGE_USER_DMA   (1 << 8)
  
  /***************************************************
@@@ -350,16 -385,50 +384,50 @@@ struct drm_bo_mem_reg 
        unsigned long num_pages;
        uint32_t page_alignment;
        uint32_t mem_type;
+       /*
+        * Current buffer status flags, indicating
+        * where the buffer is located and which
+        * access modes are in effect
+        */
        uint64_t flags;
-       uint64_t mask;
+       /**
+        * These are the flags proposed for
+        * a validate operation. If the
+        * validate succeeds, they'll get moved
+        * into the flags field
+        */
+       uint64_t proposed_flags;
+       
        uint32_t desired_tile_stride;
        uint32_t hw_tile_stride;
  };
  
  enum drm_bo_type {
-       drm_bo_type_dc,
+       /*
+        * drm_bo_type_device are 'normal' drm allocations,
+        * pages are allocated from within the kernel automatically
+        * and the objects can be mmap'd from the drm device. Each
+        * drm_bo_type_device object has a unique name which can be
+        * used by other processes to share access to the underlying
+        * buffer.
+        */
+       drm_bo_type_device,
+       /*
+        * drm_bo_type_user are buffers of pages that already exist
+        * in the process address space. They are more limited than
+        * drm_bo_type_device buffers in that they must always
+        * remain cached (as we assume the user pages are mapped cached),
+        * and they are not sharable to other processes through DRM
+        * (although, regular shared memory should still work fine).
+        */
        drm_bo_type_user,
-       drm_bo_type_kernel, /* for initial kernel allocations */
+       /*
+        * drm_bo_type_kernel are buffers that exist solely for use
+        * within the kernel. The pages cannot be mapped into the
+        * process. One obvious use would be for the ring
+        * buffer where user access would not (ideally) be required.
+        */
+       drm_bo_type_kernel,
  };
  
  struct drm_buffer_object {
@@@ -476,21 -545,48 +544,48 @@@ struct drm_bo_driver 
        int (*invalidate_caches) (struct drm_device *dev, uint64_t flags);
        int (*init_mem_type) (struct drm_device *dev, uint32_t type,
                              struct drm_mem_type_manager *man);
-        uint32_t(*evict_mask) (struct drm_buffer_object *bo);
+       /*
+        * evict_flags:
+        *
+        * @bo: the buffer object to be evicted
+        *
+        * Return the bo flags for a buffer which is not mapped to the hardware.
+        * These will be placed in proposed_flags so that when the move is
+        * finished, they'll end up in bo->mem.flags
+        */
+       uint64_t(*evict_flags) (struct drm_buffer_object *bo);
+       /*
+        * move:
+        *
+        * @bo: the buffer to move
+        *
+        * @evict: whether this motion is evicting the buffer from
+        * the graphics address space
+        *
+        * @no_wait: whether this should give up and return -EBUSY
+        * if this move would require sleeping
+        *
+        * @new_mem: the new memory region receiving the buffer
+        *
+        * Move a buffer between two memory regions.
+        */
        int (*move) (struct drm_buffer_object *bo,
                     int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
+       /*
+        * ttm_cache_flush
+        */
        void (*ttm_cache_flush)(struct drm_ttm *ttm);
  };
  
  /*
   * buffer objects (drm_bo.c)
   */
 -
  extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
  extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
  extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
  extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
  extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
 +extern int drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, int pin);
  extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
  extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
  extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
@@@ -519,7 -615,7 +614,7 @@@ extern int drm_fence_buffer_objects(str
                                    struct drm_fence_object **used_fence);
  extern void drm_bo_add_to_lru(struct drm_buffer_object *bo);
  extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
-                                   enum drm_bo_type type, uint64_t mask,
+                                   enum drm_bo_type type, uint64_t flags,
                                    uint32_t hint, uint32_t page_alignment,
                                    unsigned long buffer_start,
                                    struct drm_buffer_object **bo);
@@@ -534,9 -630,8 +629,8 @@@ extern int drm_bo_clean_mm(struct drm_d
  extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
                          unsigned long p_offset, unsigned long p_size);
  extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
-                                 uint32_t fence_class, uint64_t flags,
-                                 uint64_t mask, uint32_t hint,
-                                 int use_old_fence_class,
+                                 uint64_t flags, uint64_t mask, uint32_t hint,
+                                 uint32_t fence_class, int use_old_fence_class,
                                  struct drm_bo_info_rep *rep,
                                  struct drm_buffer_object **bo_rep);
  extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
  extern int drm_bo_do_validate(struct drm_buffer_object *bo,
                              uint64_t flags, uint64_t mask, uint32_t hint,
                              uint32_t fence_class,
-                             int no_wait,
                              struct drm_bo_info_rep *rep);
  
  /*
@@@ -628,10 -722,6 +721,10 @@@ extern void drm_regs_init(struct drm_re
                                              const void *),
                          void (*reg_destroy)(struct drm_reg *));
  
 +extern int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
 +                             void **virtual);
 +extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
 +                              void *virtual);
  /*
   * drm_bo_lock.c
   * Simple replacement for the hardware lock on buffer manager init and clean.
diff --combined linux-core/drm_vm.c
@@@ -86,7 -86,7 +86,7 @@@ static __inline__ struct page *drm_do_v
                                                unsigned long address)
  {
        struct drm_file *priv = vma->vm_file->private_data;
 -      struct drm_device *dev = priv->head->dev;
 +      struct drm_device *dev = priv->minor->dev;
        struct drm_map *map = NULL;
        struct drm_map_list *r_list;
        struct drm_hash_item *hash;
@@@ -189,7 -189,7 +189,7 @@@ static __inline__ struct page *drm_do_v
                return NOPAGE_SIGBUS;
        get_page(page);
  
-       DRM_DEBUG("shm_nopage 0x%lx\n", address);
+       DRM_DEBUG("0x%lx\n", address);
        return page;
  }
  
  static void drm_vm_shm_close(struct vm_area_struct *vma)
  {
        struct drm_file *priv = vma->vm_file->private_data;
 -      struct drm_device *dev = priv->head->dev;
 +      struct drm_device *dev = priv->minor->dev;
        struct drm_vma_entry *pt, *temp;
        struct drm_map *map;
        struct drm_map_list *r_list;
@@@ -286,7 -286,7 +286,7 @@@ static __inline__ struct page *drm_do_v
                                                    unsigned long address)
  {
        struct drm_file *priv = vma->vm_file->private_data;
 -      struct drm_device *dev = priv->head->dev;
 +      struct drm_device *dev = priv->minor->dev;
        struct drm_device_dma *dma = dev->dma;
        unsigned long offset;
        unsigned long page_nr;
  
        get_page(page);
  
-       DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
+       DRM_DEBUG("0x%lx (page %lu)\n", address, page_nr);
        return page;
  }
  
@@@ -323,7 -323,7 +323,7 @@@ static __inline__ struct page *drm_do_v
  {
        struct drm_map *map = (struct drm_map *) vma->vm_private_data;
        struct drm_file *priv = vma->vm_file->private_data;
 -      struct drm_device *dev = priv->head->dev;
 +      struct drm_device *dev = priv->minor->dev;
        struct drm_sg_mem *entry = dev->sg;
        unsigned long offset;
        unsigned long map_offset;
@@@ -419,7 -419,7 +419,7 @@@ static struct vm_operations_struct drm_
  static void drm_vm_open_locked(struct vm_area_struct *vma)
  {
        struct drm_file *priv = vma->vm_file->private_data;
 -      struct drm_device *dev = priv->head->dev;
 +      struct drm_device *dev = priv->minor->dev;
        struct drm_vma_entry *vma_entry;
  
        DRM_DEBUG("0x%08lx,0x%08lx\n",
  static void drm_vm_open(struct vm_area_struct *vma)
  {
        struct drm_file *priv = vma->vm_file->private_data;
 -      struct drm_device *dev = priv->head->dev;
 +      struct drm_device *dev = priv->minor->dev;
  
        mutex_lock(&dev->struct_mutex);
        drm_vm_open_locked(vma);
  static void drm_vm_close(struct vm_area_struct *vma)
  {
        struct drm_file *priv = vma->vm_file->private_data;
 -      struct drm_device *dev = priv->head->dev;
 +      struct drm_device *dev = priv->minor->dev;
        struct drm_vma_entry *pt, *temp;
  
        DRM_DEBUG("0x%08lx,0x%08lx\n",
@@@ -491,7 -491,7 +491,7 @@@ static int drm_mmap_dma(struct file *fi
        struct drm_device_dma *dma;
        unsigned long length = vma->vm_end - vma->vm_start;
  
 -      dev = priv->head->dev;
 +      dev = priv->minor->dev;
        dma = dev->dma;
        DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
                  vma->vm_start, vma->vm_end, vma->vm_pgoff);
@@@ -556,7 -556,7 +556,7 @@@ EXPORT_SYMBOL(drm_core_get_reg_ofs)
  static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
  {
        struct drm_file *priv = filp->private_data;
 -      struct drm_device *dev = priv->head->dev;
 +      struct drm_device *dev = priv->minor->dev;
        struct drm_map *map = NULL;
        unsigned long offset = 0;
        struct drm_hash_item *hash;
  int drm_mmap(struct file *filp, struct vm_area_struct *vma)
  {
        struct drm_file *priv = filp->private_data;
 -      struct drm_device *dev = priv->head->dev;
 +      struct drm_device *dev = priv->minor->dev;
        int ret;
  
        mutex_lock(&dev->struct_mutex);
@@@ -751,10 -751,10 +751,10 @@@ static unsigned long drm_bo_vm_nopfn(st
         */
  
        if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
-               uint32_t new_mask = bo->mem.mask |
+               uint32_t new_flags = bo->mem.proposed_flags |
                        DRM_BO_FLAG_MAPPABLE |
                        DRM_BO_FLAG_FORCE_MAPPABLE;
-               err = drm_bo_move_buffer(bo, new_mask, 0, 0);
+               err = drm_bo_move_buffer(bo, new_flags, 0, 0);
                if (err) {
                        ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
                        goto out_unlock;
diff --combined linux-core/i810_dma.c
@@@ -113,7 -113,7 +113,7 @@@ static int i810_mmap_buffers(struct fil
        drm_i810_buf_priv_t *buf_priv;
  
        lock_kernel();
 -      dev = priv->head->dev;
 +      dev = priv->minor->dev;
        dev_priv = dev->dev_private;
        buf = dev_priv->mmap_buffer;
        buf_priv = buf->dev_private;
@@@ -141,7 -141,7 +141,7 @@@ static const struct file_operations i81
  
  static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
  {
 -      struct drm_device *dev = file_priv->head->dev;
 +      struct drm_device *dev = file_priv->minor->dev;
        drm_i810_buf_priv_t *buf_priv = buf->dev_private;
        drm_i810_private_t *dev_priv = dev->dev_private;
        const struct file_operations *old_fops;
@@@ -589,7 -589,7 +589,7 @@@ static void i810EmitState(struct drm_de
        drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
        unsigned int dirty = sarea_priv->dirty;
  
-       DRM_DEBUG("%s %x\n", __FUNCTION__, dirty);
+       DRM_DEBUG("%x\n", dirty);
  
        if (dirty & I810_UPLOAD_BUFFERS) {
                i810EmitDestVerified(dev, sarea_priv->BufferState);
@@@ -821,8 -821,7 +821,7 @@@ static void i810_dma_dispatch_flip(stru
        int pitch = dev_priv->pitch;
        RING_LOCALS;
  
-       DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
-                 __FUNCTION__,
+       DRM_DEBUG("page=%d pfCurrentPage=%d\n",
                  dev_priv->current_page,
                  dev_priv->sarea_priv->pf_current_page);
  
@@@ -867,8 -866,6 +866,6 @@@ static void i810_dma_quiescent(struct d
        drm_i810_private_t *dev_priv = dev->dev_private;
        RING_LOCALS;
  
- /*    printk("%s\n", __FUNCTION__); */
        i810_kernel_lost_context(dev);
  
        BEGIN_LP_RING(4);
@@@ -888,8 -885,6 +885,6 @@@ static int i810_flush_queue(struct drm_
        int i, ret = 0;
        RING_LOCALS;
  
- /*    printk("%s\n", __FUNCTION__); */
        i810_kernel_lost_context(dev);
  
        BEGIN_LP_RING(2);
@@@ -968,7 -963,7 +963,7 @@@ static int i810_dma_vertex(struct drm_d
  
        LOCK_TEST_WITH_RETURN(dev, file_priv);
  
-       DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n",
+       DRM_DEBUG("idx %d used %d discard %d\n",
                  vertex->idx, vertex->used, vertex->discard);
  
        if (vertex->idx < 0 || vertex->idx > dma->buf_count)
@@@ -1006,7 -1001,7 +1001,7 @@@ static int i810_clear_bufs(struct drm_d
  static int i810_swap_bufs(struct drm_device *dev, void *data,
                          struct drm_file *file_priv)
  {
-       DRM_DEBUG("i810_swap_bufs\n");
+       DRM_DEBUG("\n");
  
        LOCK_TEST_WITH_RETURN(dev, file_priv);
  
@@@ -1087,11 -1082,10 +1082,10 @@@ static void i810_dma_dispatch_mc(struc
  
        sarea_priv->dirty = 0x7f;
  
-       DRM_DEBUG("dispatch mc addr 0x%lx, used 0x%x\n", address, used);
+       DRM_DEBUG("addr 0x%lx, used 0x%x\n", address, used);
  
        dev_priv->counter++;
        DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
-       DRM_DEBUG("i810_dma_dispatch_mc\n");
        DRM_DEBUG("start : %lx\n", start);
        DRM_DEBUG("used : %d\n", used);
        DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
@@@ -1197,7 -1191,7 +1191,7 @@@ static void i810_do_init_pageflip(struc
  {
        drm_i810_private_t *dev_priv = dev->dev_private;
  
-       DRM_DEBUG("%s\n", __FUNCTION__);
+       DRM_DEBUG("\n");
        dev_priv->page_flipping = 1;
        dev_priv->current_page = 0;
        dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
@@@ -1207,7 -1201,7 +1201,7 @@@ static int i810_do_cleanup_pageflip(str
  {
        drm_i810_private_t *dev_priv = dev->dev_private;
  
-       DRM_DEBUG("%s\n", __FUNCTION__);
+       DRM_DEBUG("\n");
        if (dev_priv->current_page != 0)
                i810_dma_dispatch_flip(dev);
  
@@@ -1220,7 -1214,7 +1214,7 @@@ static int i810_flip_bufs(struct drm_de
  {
        drm_i810_private_t *dev_priv = dev->dev_private;
  
-       DRM_DEBUG("%s\n", __FUNCTION__);
+       DRM_DEBUG("\n");
  
        LOCK_TEST_WITH_RETURN(dev, file_priv);
  
diff --combined linux-core/i915_buffer.c
@@@ -38,11 -38,11 +38,11 @@@ struct drm_ttm_backend *i915_create_ttm
        return drm_agp_init_ttm(dev);
  }
  
- int i915_fence_types(struct drm_buffer_object *bo,
+ int i915_fence_type(struct drm_buffer_object *bo,
                     uint32_t *fclass,
                     uint32_t *type)
  {
-       if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
+       if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
                *type = 3;
        else
                *type = 1;
@@@ -89,7 -89,7 +89,7 @@@ int i915_init_mem_type(struct drm_devic
                man->drm_bus_maptype = _DRM_AGP;
                man->gpu_offset = 0;
                break;
 -      case DRM_BO_MEM_PRIV0:
 +      case DRM_BO_MEM_VRAM:
                if (!(drm_core_has_AGP(dev) && dev->agp)) {
                        DRM_ERROR("AGP is not enabled for memory type %u\n",
                                  (unsigned)type);
                man->drm_bus_maptype = _DRM_AGP;
                man->gpu_offset = 0;
                break;
 +      case DRM_BO_MEM_PRIV0: /* for OS preallocated space */
 +              DRM_ERROR("PRIV0 not used yet.\n");
 +              break;
        default:
                DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
                return -EINVAL;
        return 0;
  }
  
- uint32_t i915_evict_mask(struct drm_buffer_object *bo)
+ /*
+  * i915_evict_flags:
+  *
+  * @bo: the buffer object to be evicted
+  *
+  * Return the bo flags for a buffer which is not mapped to the hardware.
+  * These will be placed in proposed_flags so that when the move is
+  * finished, they'll end up in bo->mem.flags
+  */
+ uint64_t i915_evict_flags(struct drm_buffer_object *bo)
  {
        switch (bo->mem.mem_type) {
        case DRM_BO_MEM_LOCAL:
@@@ -133,7 -139,7 +142,7 @@@ static void i915_emit_copy_blit(struct 
  {
        uint32_t cur_pages;
        uint32_t stride = PAGE_SIZE;
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        RING_LOCALS;
  
        if (!dev_priv)
@@@ -280,7 -286,18 +289,18 @@@ void i915_flush_ttm(struct drm_ttm *ttm
                return;
  
        DRM_MEMORYBARRIER();
+ #ifdef CONFIG_X86_32
+       /* Hopefully nobody has built an x86-64 processor without clflush */
+       if (!cpu_has_clflush) {
+               wbinvd();
+               DRM_MEMORYBARRIER();
+               return;
+       }
+ #endif
        for (i = ttm->num_pages - 1; i >= 0; i--)
                drm_cache_flush_page(drm_ttm_get_page(ttm, i));
        DRM_MEMORYBARRIER();
  }
diff --combined linux-core/i915_drv.c
@@@ -30,7 -30,6 +30,7 @@@
  #include "drmP.h"
  #include "drm.h"
  #include "i915_drm.h"
 +#include "intel_drv.h"
  #include "i915_drv.h"
  
  #include "drm_pciids.h"
@@@ -53,8 -52,8 +53,8 @@@ static struct drm_fence_driver i915_fen
  #endif
  #ifdef I915_HAVE_BUFFER
  
 -static uint32_t i915_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
 -static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_LOCAL};
 +static uint32_t i915_mem_prios[] = {DRM_BO_MEM_VRAM, DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
 +static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM_LOCAL};
  
  static struct drm_bo_driver i915_bo_driver = {
        .mem_type_prio = i915_mem_prios,
        .num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t),
        .num_mem_busy_prio = sizeof(i915_busy_prios)/sizeof(uint32_t),
        .create_ttm_backend_entry = i915_create_ttm_backend_entry,
-       .fence_type = i915_fence_types,
+       .fence_type = i915_fence_type,
        .invalidate_caches = i915_invalidate_caches,
        .init_mem_type = i915_init_mem_type,
-       .evict_mask = i915_evict_mask,
+       .evict_flags = i915_evict_flags,
        .move = i915_move,
        .ttm_cache_flush = i915_flush_ttm,
  };
@@@ -331,7 -330,7 +331,7 @@@ static int i915_suspend(struct drm_devi
        dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
        dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
        dev_priv->saveDSPBBASE = I915_READ(DSPBBASE);
-       if (IS_I965GM(dev)) {
+       if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
                dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
                dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
        }
@@@ -537,17 -536,18 +537,17 @@@ static struct drm_driver driver = 
         */
        .driver_features =
            DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */
-           DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL |
-           DRIVER_IRQ_VBL2,
+           DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
        .load = i915_driver_load,
        .unload = i915_driver_unload,
 -      .firstopen = i915_driver_firstopen,
 -      .lastclose = i915_driver_lastclose,
 -      .preclose = i915_driver_preclose,
 +/*    .lastclose = i915_driver_lastclose,
 +      .preclose = i915_driver_preclose, */
        .suspend = i915_suspend,
        .resume = i915_resume,
        .device_is_agp = i915_driver_device_is_agp,
-       .vblank_wait = i915_driver_vblank_wait,
-       .vblank_wait2 = i915_driver_vblank_wait2,
+       .get_vblank_counter = i915_get_vblank_counter,
+       .enable_vblank = i915_enable_vblank,
+       .disable_vblank = i915_disable_vblank,
        .irq_preinstall = i915_driver_irq_preinstall,
        .irq_postinstall = i915_driver_irq_postinstall,
        .irq_uninstall = i915_driver_irq_uninstall,
        .reclaim_buffers = drm_core_reclaim_buffers,
        .get_map_ofs = drm_core_get_map_ofs,
        .get_reg_ofs = drm_core_get_reg_ofs,
 +      .fb_probe = intelfb_probe,
 +      .fb_remove = intelfb_remove,
 +      .fb_resize = intelfb_resize,
        .ioctls = i915_ioctls,
        .fops = {
                .owner = THIS_MODULE,
index 8e2b20b,0000000..5dff189
mode 100644,000000..100644
--- /dev/null
@@@ -1,263 -1,0 +1,263 @@@
- uint32_t radeon_evict_mask(struct drm_buffer_object *bo)
 +/**************************************************************************
 + * 
 + * Copyright 2007 Dave Airlie
 + * All Rights Reserved.
 + * 
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the
 + * "Software"), to deal in the Software without restriction, including
 + * without limitation the rights to use, copy, modify, merge, publish,
 + * distribute, sub license, and/or sell copies of the Software, and to
 + * permit persons to whom the Software is furnished to do so, subject to
 + * the following conditions:
 + * 
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
 + * USE OR OTHER DEALINGS IN THE SOFTWARE.
 + *
 + * The above copyright notice and this permission notice (including the
 + * next paragraph) shall be included in all copies or substantial portions
 + * of the Software.
 + * 
 + * 
 + **************************************************************************/
 +/*
 + * Authors: Dave Airlie <airlied@linux.ie>
 + */
 +
 +#include "drmP.h"
 +#include "radeon_drm.h"
 +#include "radeon_drv.h"
 +
 +struct drm_ttm_backend *radeon_create_ttm_backend_entry(struct drm_device * dev)
 +{
 +      drm_radeon_private_t *dev_priv = dev->dev_private;
 +
 +      if(dev_priv->flags & RADEON_IS_AGP)
 +              return drm_agp_init_ttm(dev);
 +      else
 +              return ati_pcigart_init_ttm(dev, &dev_priv->gart_info, radeon_gart_flush);
 +}
 +
 +int radeon_fence_types(struct drm_buffer_object *bo, uint32_t * class, uint32_t * type)
 +{
 +      *class = 0;
 +      if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
 +              *type = 3;
 +      else
 +              *type = 1;
 +      return 0;
 +}
 +
 +int radeon_invalidate_caches(struct drm_device * dev, uint64_t flags)
 +{
 +      drm_radeon_private_t *dev_priv = dev->dev_private;
 +      RING_LOCALS;
 +
 +      BEGIN_RING(4);
 +      RADEON_FLUSH_CACHE();
 +      RADEON_FLUSH_ZCACHE();
 +      ADVANCE_RING();
 +      return 0;
 +}
 +
-       tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
++uint64_t radeon_evict_flags(struct drm_buffer_object *bo)
 +{
 +      switch (bo->mem.mem_type) {
 +      case DRM_BO_MEM_LOCAL:
 +      case DRM_BO_MEM_TT:
 +              return DRM_BO_FLAG_MEM_LOCAL;
 +      case DRM_BO_MEM_VRAM:
 +              if (bo->mem.num_pages > 128)
 +                      return DRM_BO_MEM_TT;
 +              else
 +                      return DRM_BO_MEM_LOCAL;
 +      default:
 +              return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
 +      }
 +}
 +
 +int radeon_init_mem_type(struct drm_device * dev, uint32_t type,
 +                       struct drm_mem_type_manager * man)
 +{
 +      drm_radeon_private_t *dev_priv = dev->dev_private;
 +
 +      switch (type) {
 +      case DRM_BO_MEM_LOCAL:
 +              man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
 +                  _DRM_FLAG_MEMTYPE_CACHED;
 +              man->drm_bus_maptype = 0;
 +              break;
 +      case DRM_BO_MEM_VRAM:
 +              man->flags =  _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP;
 +              man->io_addr = NULL;
 +              man->drm_bus_maptype = _DRM_FRAME_BUFFER;
 +              man->io_offset = drm_get_resource_start(dev, 0);
 +              man->io_size = drm_get_resource_len(dev, 0);
 +              break;
 +      case DRM_BO_MEM_TT:
 +              if (dev_priv->flags & RADEON_IS_AGP) {
 +                      if (!(drm_core_has_AGP(dev) && dev->agp)) {
 +                              DRM_ERROR("AGP is not enabled for memory type %u\n",
 +                                        (unsigned)type);
 +                              return -EINVAL;
 +                      }
 +                      man->io_offset = dev->agp->agp_info.aper_base;
 +                      man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
 +                      man->io_addr = NULL;
 +                      man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
 +                              _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
 +                      man->drm_bus_maptype = _DRM_AGP;
 +              } else {
 +                      man->io_offset = dev_priv->gart_vm_start;
 +                      man->io_size = dev_priv->gart_size;
 +                      man->io_addr = NULL;
 +                      man->flags = _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_CMA;
 +                      man->drm_bus_maptype = _DRM_SCATTER_GATHER;
 +              }
 +              break;
 +      default:
 +              DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
 +              return -EINVAL;
 +      }
 +      return 0;
 +}
 +
 +static void radeon_emit_copy_blit(struct drm_device * dev,
 +                                uint32_t src_offset,
 +                                uint32_t dst_offset,
 +                                uint32_t pages, int direction)
 +{
 +      uint32_t cur_pages;
 +      uint32_t stride = PAGE_SIZE;
 +      drm_radeon_private_t *dev_priv = dev->dev_private;
 +      uint32_t format, height;
 +      RING_LOCALS;
 +
 +      if (!dev_priv)
 +              return;
 +
 +      /* 32-bit copy format */
 +      format = RADEON_COLOR_FORMAT_ARGB8888;
 +
 +      /* radeon limited to 16k stride */
 +      stride &= 0x3fff;
 +      while(pages > 0) {
 +              cur_pages = pages;
 +              if (cur_pages > 2048)
 +                      cur_pages = 2048;
 +              pages -= cur_pages;
 +
 +              /* needs verification */
 +              BEGIN_RING(7);          
 +              OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
 +              OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
 +                       RADEON_GMC_DST_PITCH_OFFSET_CNTL |
 +                       RADEON_GMC_BRUSH_NONE |
 +                       (format << 8) |
 +                       RADEON_GMC_SRC_DATATYPE_COLOR |
 +                       RADEON_ROP3_S |
 +                       RADEON_DP_SRC_SOURCE_MEMORY |
 +                       RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
 +              if (direction) {
 +                      OUT_RING((stride << 22) | (src_offset >> 10));
 +                      OUT_RING((stride << 22) | (dst_offset >> 10));
 +              } else {
 +                      OUT_RING((stride << 22) | (dst_offset >> 10));
 +                      OUT_RING((stride << 22) | (src_offset >> 10));
 +              }
 +              OUT_RING(0);
 +              OUT_RING(pages); /* x - y */
 +              OUT_RING((stride << 16) | cur_pages);
 +              ADVANCE_RING();
 +      }
 +
 +      BEGIN_RING(2);
 +      RADEON_WAIT_UNTIL_2D_IDLE();
 +      ADVANCE_RING();
 +
 +      return;
 +}
 +
 +static int radeon_move_blit(struct drm_buffer_object * bo,
 +                          int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
 +{
 +      struct drm_bo_mem_reg *old_mem = &bo->mem;
 +      int dir = 0;
 +
 +      if ((old_mem->mem_type == new_mem->mem_type) &&
 +          (new_mem->mm_node->start <
 +           old_mem->mm_node->start + old_mem->mm_node->size)) {
 +              dir = 1;
 +      }
 +
 +      radeon_emit_copy_blit(bo->dev,
 +                            old_mem->mm_node->start << PAGE_SHIFT,
 +                            new_mem->mm_node->start << PAGE_SHIFT,
 +                            new_mem->num_pages, dir);
 +
 +      
 +      return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
 +                                       DRM_FENCE_TYPE_EXE |
 +                                       DRM_RADEON_FENCE_TYPE_RW,
 +                                       DRM_RADEON_FENCE_FLAG_FLUSHED, new_mem);
 +}
 +
 +static int radeon_move_flip(struct drm_buffer_object * bo,
 +                          int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
 +{
 +      struct drm_device *dev = bo->dev;
 +      struct drm_bo_mem_reg tmp_mem;
 +      int ret;
 +
 +      tmp_mem = *new_mem;
 +      tmp_mem.mm_node = NULL;
-       ret = drm_bind_ttm(bo->ttm, &tmp_mem);
++      tmp_mem.flags = DRM_BO_FLAG_MEM_TT |
 +          DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
 +
 +      ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
 +      if (ret)
 +              return ret;
 +
++      ret = drm_ttm_bind(bo->ttm, &tmp_mem);
 +      if (ret)
 +              goto out_cleanup;
 +
 +      ret = radeon_move_blit(bo, 1, no_wait, &tmp_mem);
 +      if (ret)
 +              goto out_cleanup;
 +
 +      ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
 +out_cleanup:
 +      if (tmp_mem.mm_node) {
 +              mutex_lock(&dev->struct_mutex);
 +              if (tmp_mem.mm_node != bo->pinned_node)
 +                      drm_mm_put_block(tmp_mem.mm_node);
 +              tmp_mem.mm_node = NULL;
 +              mutex_unlock(&dev->struct_mutex);
 +      }
 +      return ret;
 +}
 +
 +int radeon_move(struct drm_buffer_object * bo,
 +              int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
 +{
 +      struct drm_bo_mem_reg *old_mem = &bo->mem;
 +
 +      DRM_DEBUG("\n");
 +      if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
 +              return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
 +      } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
 +              if (radeon_move_flip(bo, evict, no_wait, new_mem))
 +                      return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
 +      } else {
 +              if (radeon_move_blit(bo, evict, no_wait, new_mem))
 +                      return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
 +      }
 +      return 0;
 +}
 +
diff --combined linux-core/radeon_drv.c
@@@ -56,44 -56,11 +56,43 @@@ static struct pci_device_id pciidlist[
        radeon_PCI_IDS
  };
  
-       .evict_mask = radeon_evict_mask,
 +
 +#ifdef RADEON_HAVE_FENCE
 +static struct drm_fence_driver radeon_fence_driver = {
 +      .num_classes = 1,
 +      .wrap_diff = (1 << 30),
 +      .flush_diff = (1 << 29),
 +      .sequence_mask = 0xffffffffU,
 +      .lazy_capable = 1,
 +      .emit = radeon_fence_emit_sequence,
 +      .poke_flush = radeon_poke_flush,
 +      .has_irq = radeon_fence_has_irq,
 +};
 +#endif
 +#ifdef RADEON_HAVE_BUFFER
 +
 +static uint32_t radeon_mem_prios[] = {DRM_BO_MEM_VRAM, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
 +static uint32_t radeon_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_VRAM, DRM_BO_MEM_LOCAL};
 +
 +static struct drm_bo_driver radeon_bo_driver = {
 +      .mem_type_prio = radeon_mem_prios,
 +      .mem_busy_prio = radeon_busy_prios,
 +      .num_mem_type_prio = sizeof(radeon_mem_prios)/sizeof(uint32_t),
 +      .num_mem_busy_prio = sizeof(radeon_busy_prios)/sizeof(uint32_t),
 +      .create_ttm_backend_entry = radeon_create_ttm_backend_entry,
 +      .fence_type = radeon_fence_types,
 +      .invalidate_caches = radeon_invalidate_caches,
 +      .init_mem_type = radeon_init_mem_type,
++      .evict_flags = radeon_evict_flags,
 +      .move = radeon_move,
 +};
 +#endif
 +
  static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  static struct drm_driver driver = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
-           DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED |
-           DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
+           DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
        .dev_priv_size = sizeof(drm_radeon_buf_priv_t),
        .load = radeon_driver_load,
        .firstopen = radeon_driver_firstopen,
        .postclose = radeon_driver_postclose,
        .lastclose = radeon_driver_lastclose,
        .unload = radeon_driver_unload,
-       .vblank_wait = radeon_driver_vblank_wait,
-       .vblank_wait2 = radeon_driver_vblank_wait2,
+       .get_vblank_counter = radeon_get_vblank_counter,
+       .enable_vblank = radeon_enable_vblank,
+       .disable_vblank = radeon_disable_vblank,
        .dri_library_name = dri_library_name,
        .irq_preinstall = radeon_driver_irq_preinstall,
        .irq_postinstall = radeon_driver_irq_postinstall,
                .remove = __devexit_p(drm_cleanup_pci),
        },
  
 +#ifdef RADEON_HAVE_FENCE
 +      .fence_driver = &radeon_fence_driver,
 +#endif
 +#ifdef RADEON_HAVE_BUFFER
 +      .bo_driver = &radeon_bo_driver,
 +#endif
 +
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
        .date = DRIVER_DATE,
index 4b52a7d,0000000..d7b0eec
mode 100644,000000..100644
--- /dev/null
@@@ -1,146 -1,0 +1,143 @@@
-       .vblank_wait = NULL,
-       .vblank_wait2 = NULL,
 +/*
 + * Copyright 2007 Jerome Glisse.
 + * All Rights Reserved.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice (including the next
 + * paragraph) shall be included in all copies or substantial portions of the
 + * Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + */
 +/*
 + * Authors:
 + *    Jerome Glisse <glisse@freedesktop.org>
 + */
 +#include "drm_pciids.h"
 +#include "radeon_ms.h"
 +
 +extern struct drm_fence_driver radeon_ms_fence_driver;
 +extern struct drm_bo_driver radeon_ms_bo_driver;
 +extern struct drm_ioctl_desc radeon_ms_ioctls[];
 +extern int radeon_ms_num_ioctls;
 +
 +static int radeon_ms_driver_dri_library_name(struct drm_device * dev,
 +                                           char * buf);
 +static int radeon_ms_driver_probe(struct pci_dev *pdev,
 +                                const struct pci_device_id *ent);
 +
 +static struct pci_device_id pciidlist[] = {
 +      radeon_ms_PCI_IDS
 +};
 +
 +static struct drm_driver driver = {
 +      .load = radeon_ms_driver_load,
 +      .firstopen = NULL,
 +      .open = radeon_ms_driver_open,
 +      .preclose = NULL,
 +      .postclose = NULL,
 +      .lastclose = radeon_ms_driver_lastclose,
 +      .unload = radeon_ms_driver_unload,
 +      .dma_ioctl = radeon_ms_driver_dma_ioctl,
 +      .dma_ready = NULL,
 +      .dma_quiescent = NULL,
 +      .context_ctor = NULL,
 +      .context_dtor = NULL,
 +      .kernel_context_switch = NULL,
 +      .kernel_context_switch_unlock = NULL,
-           DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED |
-           DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
 +      .dri_library_name = radeon_ms_driver_dri_library_name,
 +      .device_is_agp = NULL,
 +      .irq_handler = radeon_ms_irq_handler,
 +      .irq_preinstall = radeon_ms_irq_preinstall,
 +      .irq_postinstall = radeon_ms_irq_postinstall,
 +      .irq_uninstall = radeon_ms_irq_uninstall,
 +      .reclaim_buffers = drm_core_reclaim_buffers,
 +      .reclaim_buffers_locked = NULL,
 +      .reclaim_buffers_idlelocked = NULL,
 +      .get_map_ofs = drm_core_get_map_ofs,
 +      .get_reg_ofs = drm_core_get_reg_ofs,
 +      .set_version = NULL,
 +      .fb_probe = radeonfb_probe,
 +      .fb_remove = radeonfb_remove,
 +      .fence_driver = &radeon_ms_fence_driver,
 +      .bo_driver = &radeon_ms_bo_driver,
 +      .major = DRIVER_MAJOR,
 +      .minor = DRIVER_MINOR,
 +      .patchlevel = DRIVER_PATCHLEVEL,
 +      .name = DRIVER_NAME,
 +      .desc = DRIVER_DESC,
 +      .date = DRIVER_DATE,
 +      .driver_features =
 +          DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
++          DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
 +      .dev_priv_size = 0, 
 +      .ioctls = radeon_ms_ioctls,
 +      .num_ioctls = 0,
 +      .fops = {
 +              .owner = THIS_MODULE,
 +              .open = drm_open,
 +              .release = drm_release,
 +              .ioctl = drm_ioctl,
 +              .mmap = drm_mmap,
 +              .poll = drm_poll,
 +              .fasync = drm_fasync,
 +#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
 +              .compat_ioctl = radeon_ms_compat_ioctl,
 +#endif
 +              },
 +      .pci_driver = {
 +              .name = DRIVER_NAME,
 +              .id_table = pciidlist,
 +              .probe = radeon_ms_driver_probe,
 +              .remove = __devexit_p(drm_cleanup_pci),
 +      },
 +};
 +
 +static int radeon_ms_driver_probe(struct pci_dev *pdev,
 +                                const struct pci_device_id *ent)
 +{
 +      return drm_get_dev(pdev, ent, &driver);
 +}
 +
 +static int radeon_ms_driver_dri_library_name(struct drm_device * dev,
 +                                           char * buf)
 +{
 +      struct drm_radeon_private *dev_priv = dev->dev_private;
 +      int ret;
 +
 +      switch (dev_priv->family) {
 +      default:
 +              ret = snprintf(buf, PAGE_SIZE, "\n");
 +      }
 +      return ret;
 +}
 +
 +static void __exit radeon_ms_driver_exit(void)
 +{
 +      drm_exit(&driver);
 +}
 +
 +static int __init radeon_ms_driver_init(void)
 +{
 +      driver.num_ioctls = radeon_ms_num_ioctls;
 +      return drm_init(&driver, pciidlist);
 +}
 +
 +module_init(radeon_ms_driver_init);
 +module_exit(radeon_ms_driver_exit);
 +
 +MODULE_AUTHOR(DRIVER_AUTHOR);
 +MODULE_DESCRIPTION(DRIVER_DESC);
 +MODULE_LICENSE("GPL and additional rights");
diff --combined shared-core/drm.h
@@@ -555,6 -555,21 +555,21 @@@ union drm_wait_vblank 
        struct drm_wait_vblank_reply reply;
  };
  
+ enum drm_modeset_ctl_cmd {
+       _DRM_PRE_MODESET = 1,
+       _DRM_POST_MODESET = 2,
+ };
+ /**
+  * DRM_IOCTL_MODESET_CTL ioctl argument type
+  *
+  * \sa drmModesetCtl().
+  */
+ struct drm_modeset_ctl {
+       unsigned long arg;
+       enum drm_modeset_ctl_cmd cmd;
+ };
  /**
   * DRM_IOCTL_AGP_ENABLE ioctl argument type.
   *
@@@ -662,6 -677,10 +677,10 @@@ struct drm_fence_arg 
  #define DRM_BO_FLAG_EXE         (1ULL << 2)
  
  /*
+  * All of the bits related to access mode
+  */
+ #define DRM_BO_MASK_ACCESS    (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE)
+ /*
   * Status flags. Can be read to determine the actual state of a buffer.
   * Can also be set in the buffer mask before validation.
   */
   */
  #define DRM_BO_FLAG_NO_MOVE     (1ULL << 8)
  
- /* Mask: Make sure the buffer is in cached memory when mapped
+ /* Mask: Make sure the buffer is in cached memory when mapped.  In conjunction
+  * with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART
+  * with unsnooped PTEs instead of snooped, by using chipset-specific cache
+  * flushing at bind time.  A better name might be DRM_BO_FLAG_TT_UNSNOOPED,
+  * as the eviction to local memory (TTM unbind) on map is just a side effect
+  * to prevent aggressive cache prefetch from the GPU disturbing the cache
+  * management that the DRM is doing.
+  *
   * Flags: Acknowledge.
   * Buffers allocated with this flag should not be used for suballocators
   * This type may have issues on CPUs with over-aggressive caching
  #define DRM_BO_FLAG_MEM_PRIV4  (1ULL << 31)
  /* We can add more of these now with a 64-bit flag type */
  
- /* Memory flag mask */
+ /*
+  * This is a mask covering all of the memory type flags; easier to just
+  * use a single constant than a bunch of | values. It covers
+  * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4
+  */
  #define DRM_BO_MASK_MEM         0x00000000FF000000ULL
- #define DRM_BO_MASK_MEMTYPE     0x00000000FF0800A0ULL
+ /*
+  * This adds all of the CPU-mapping options in with the memory
+  * type to label all bits which change how the page gets mapped
+  */
+ #define DRM_BO_MASK_MEMTYPE     (DRM_BO_MASK_MEM | \
+                                DRM_BO_FLAG_CACHED_MAPPED | \
+                                DRM_BO_FLAG_CACHED | \
+                                DRM_BO_FLAG_MAPPABLE)
+                                
  /* Driver-private flags */
  #define DRM_BO_MASK_DRIVER      0xFFFF000000000000ULL
  
- /* Don't block on validate and map */
+ /*
+  * Don't block on validate and map. Instead, return EBUSY.
+  */
  #define DRM_BO_HINT_DONT_BLOCK  0x00000002
- /* Don't place this buffer on the unfenced list.*/
+ /*
+  * Don't place this buffer on the unfenced list. This means
+  * that the buffer will not end up having a fence associated
+  * with it as a result of this operation
+  */
  #define DRM_BO_HINT_DONT_FENCE  0x00000004
+ /*
+  * Sleep while waiting for the operation to complete.
+  * Without this flag, the kernel will, instead, spin
+  * until this operation has completed. I'm not sure
+  * why you would ever want this, so please always
+  * provide DRM_BO_HINT_WAIT_LAZY to any operation
+  * which may block
+  */
  #define DRM_BO_HINT_WAIT_LAZY   0x00000008
+ /*
+  * The client has compute relocations refering to this buffer using the
+  * offset in the presumed_offset field. If that offset ends up matching
+  * where this buffer lands, the kernel is free to skip executing those
+  * relocations
+  */
  #define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
  
  #define DRM_BO_INIT_MAGIC 0xfe769812
@@@ -774,7 -831,7 +831,7 @@@ struct drm_bo_info_req 
  };
  
  struct drm_bo_create_req {
-       uint64_t mask;
+       uint64_t flags;
        uint64_t size;
        uint64_t buffer_start;
        unsigned int hint;
  
  struct drm_bo_info_rep {
        uint64_t flags;
-       uint64_t mask;
+       uint64_t proposed_flags;
        uint64_t size;
        uint64_t offset;
        uint64_t arg_handle;
@@@ -893,140 -950,6 +950,140 @@@ struct drm_mm_init_arg 
        uint64_t p_size;
  };
  
 +/*
 + * Drm mode setting
 + */
 +#define DRM_DISPLAY_INFO_LEN 32
 +#define DRM_OUTPUT_NAME_LEN 32
 +#define DRM_DISPLAY_MODE_LEN 32
 +#define DRM_PROP_NAME_LEN 32
 +
 +#define DRM_MODE_TYPE_BUILTIN (1<<0)
 +#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN)
 +#define DRM_MODE_TYPE_CRTC_C  ((1<<2) | DRM_MODE_TYPE_BUILTIN)
 +#define DRM_MODE_TYPE_PREFERRED       (1<<3)
 +#define DRM_MODE_TYPE_DEFAULT (1<<4)
 +#define DRM_MODE_TYPE_USERDEF (1<<5)
 +#define DRM_MODE_TYPE_DRIVER  (1<<6)
 +
 +struct drm_mode_modeinfo {
 +      unsigned int clock;
 +      unsigned short hdisplay, hsync_start, hsync_end, htotal, hskew;
 +      unsigned short vdisplay, vsync_start, vsync_end, vtotal, vscan;
 +
 +      unsigned int vrefresh; /* vertical refresh * 1000 */
 +
 +      unsigned int flags;
 +      unsigned int type;
 +      char name[DRM_DISPLAY_MODE_LEN];
 +};
 +
 +struct drm_mode_card_res {
 +      uint64_t fb_id_ptr;
 +      uint64_t crtc_id_ptr;
 +      uint64_t output_id_ptr;
 +      int count_fbs;
 +      int count_crtcs;
 +      int count_outputs;
 +      int min_width, max_width;
 +      int min_height, max_height;
 +};
 +
 +struct drm_mode_crtc {
 +      uint64_t set_outputs_ptr;
 +
 +      unsigned int crtc_id; /**< Id */
 +      unsigned int fb_id; /**< Id of framebuffer */
 +
 +      int x, y; /**< Position on the frameuffer */
 +
 +      int count_outputs;
 +      unsigned int outputs; /**< Outputs that are connected */
 +
 +      int count_possibles;
 +      unsigned int possibles; /**< Outputs that can be connected */
 +      int gamma_size;
 +      int mode_valid;
 +      struct drm_mode_modeinfo mode;
 +};
 +
 +#define DRM_MODE_OUTPUT_NONE 0
 +#define DRM_MODE_OUTPUT_DAC  1
 +#define DRM_MODE_OUTPUT_TMDS 2
 +#define DRM_MODE_OUTPUT_LVDS 3
 +#define DRM_MODE_OUTPUT_TVDAC 4
 +
 +struct drm_mode_get_output {
 +
 +      uint64_t modes_ptr;
 +      uint64_t props_ptr;
 +      uint64_t prop_values_ptr;
 +
 +      int count_modes;
 +      int count_props;
 +      unsigned int output; /**< Id */
 +      unsigned int crtc; /**< Id of crtc */
 +      unsigned int output_type;
 +      unsigned int output_type_id;
 +
 +      unsigned int connection;
 +      unsigned int mm_width, mm_height; /**< HxW in millimeters */
 +      unsigned int subpixel;
 +      int count_crtcs;
 +      int count_clones;
 +      unsigned int crtcs; /**< possible crtc to connect to */
 +      unsigned int clones; /**< list of clones */
 +};
 +
 +#define DRM_MODE_PROP_PENDING (1<<0)
 +#define DRM_MODE_PROP_RANGE (1<<1)
 +#define DRM_MODE_PROP_IMMUTABLE (1<<2)
 +#define DRM_MODE_PROP_ENUM (1<<3) // enumerated type with text strings
 +#define DRM_MODE_PROP_BLOB (1<<4)
 +
 +struct drm_mode_property_enum {
 +      uint64_t value;
 +      unsigned char name[DRM_PROP_NAME_LEN];
 +};
 +
 +struct drm_mode_get_property {
 +      uint64_t values_ptr; /* values and blob lengths */
 +      uint64_t enum_blob_ptr; /* enum and blob id ptrs */
 +
 +      unsigned int prop_id;
 +      unsigned int flags;
 +      unsigned char name[DRM_PROP_NAME_LEN];
 +
 +      int count_values;
 +      int count_enum_blobs;
 +};
 +
 +struct drm_mode_output_set_property {
 +      uint64_t value;
 +      unsigned int prop_id;
 +      unsigned int output_id;
 +};
 +
 +struct drm_mode_get_blob {
 +      uint32_t blob_id;
 +      uint32_t length;
 +      uint64_t data;
 +};
 +
 +struct drm_mode_fb_cmd {
 +        unsigned int buffer_id;
 +        unsigned int width, height;
 +        unsigned int pitch;
 +        unsigned int bpp;
 +        unsigned int handle;
 +      unsigned int depth;
 +};
 +
 +struct drm_mode_mode_cmd {
 +      unsigned int output_id;
 +      struct drm_mode_modeinfo mode;
 +};
 +
  /**
   * \name Ioctls Definitions
   */
  #define DRM_IOCTL_GET_CLIENT            DRM_IOWR(0x05, struct drm_client)
  #define DRM_IOCTL_GET_STATS             DRM_IOR( 0x06, struct drm_stats)
  #define DRM_IOCTL_SET_VERSION         DRM_IOWR(0x07, struct drm_set_version)
+ #define DRM_IOCTL_MODESET_CTL           DRM_IOW(0x08, struct drm_modeset_ctl)
  
  #define DRM_IOCTL_SET_UNIQUE          DRM_IOW( 0x10, struct drm_unique)
  #define DRM_IOCTL_AUTH_MAGIC          DRM_IOW( 0x11, struct drm_auth)
  #define DRM_IOCTL_BO_WAIT_IDLE          DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
  #define DRM_IOCTL_BO_VERSION          DRM_IOR(0xd6, struct drm_bo_version_arg)
  
 +#define DRM_IOCTL_MODE_GETRESOURCES     DRM_IOWR(0xA0, struct drm_mode_card_res)
 +#define DRM_IOCTL_MODE_GETCRTC          DRM_IOWR(0xA1, struct drm_mode_crtc)
 +#define DRM_IOCTL_MODE_GETOUTPUT        DRM_IOWR(0xA2, struct drm_mode_get_output)
 +#define DRM_IOCTL_MODE_SETCRTC          DRM_IOWR(0xA3, struct drm_mode_crtc)
 +#define DRM_IOCTL_MODE_ADDFB            DRM_IOWR(0xA4, struct drm_mode_fb_cmd)
 +#define DRM_IOCTL_MODE_RMFB             DRM_IOWR(0xA5, unsigned int)
 +#define DRM_IOCTL_MODE_GETFB            DRM_IOWR(0xA6, struct drm_mode_fb_cmd)
 +
 +#define DRM_IOCTL_MODE_SETPROPERTY     DRM_IOWR(0xA7, struct drm_mode_output_set_property)
 +#define DRM_IOCTL_MODE_GETPROPBLOB     DRM_IOWR(0xA8, struct drm_mode_get_blob)
 +#define DRM_IOCTL_MODE_ATTACHMODE      DRM_IOWR(0xA9, struct drm_mode_mode_cmd)
 +#define DRM_IOCTL_MODE_DETACHMODE      DRM_IOWR(0xAA, struct drm_mode_mode_cmd)
 +
 +#define DRM_IOCTL_MODE_GETPROPERTY     DRM_IOWR(0xAB, struct drm_mode_get_property)
++
  /*@}*/
  
  /**
@@@ -1,7 -1,3 +1,7 @@@
 +[radeon_ms]
 +0x1002 0x4150 CHIP_RV350|RADEON_AGP "ATI Radeon RV350 9600"
 +0x1002 0x5b63 CHIP_RV370|RADEON_PCIE "ATI Radeon RV370 X550"
 +
  [radeon]
  0x1002 0x3150 CHIP_RV380|RADEON_IS_MOBILITY "ATI Radeon Mobility X600 M24"
  0x1002 0x3152 CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X300 M24"
  0x1002 0x5e4c CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700 SE"
  0x1002 0x5e4d CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700"
  0x1002 0x5e4f CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700 SE"
+ 0x1002 0x7100 CHIP_R520|RADEON_NEW_MEMMAP "ATI Radeon X1800"
+ 0x1002 0x7101 CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1800 XT"
+ 0x1002 0x7102 CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1800"
+ 0x1002 0x7103 CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility FireGL V7200"
+ 0x1002 0x7104 CHIP_R520|RADEON_NEW_MEMMAP "ATI FireGL V7200"
+ 0x1002 0x7105 CHIP_R520|RADEON_NEW_MEMMAP "ATI FireGL V5300"
+ 0x1002 0x7106 CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility FireGL V7100"
+ 0x1002 0x7108 CHIP_R520|RADEON_NEW_MEMMAP "ATI Radeon X1800"
+ 0x1002 0x7109 CHIP_R520|RADEON_NEW_MEMMAP "ATI Radeon X1800"
+ 0x1002 0x710A CHIP_R520|RADEON_NEW_MEMMAP "ATI Radeon X1800"
+ 0x1002 0x710B CHIP_R520|RADEON_NEW_MEMMAP "ATI Radeon X1800"
+ 0x1002 0x710C CHIP_R520|RADEON_NEW_MEMMAP "ATI Radeon X1800"
+ 0x1002 0x710E CHIP_R520|RADEON_NEW_MEMMAP "ATI FireGL V7300"
+ 0x1002 0x710F CHIP_R520|RADEON_NEW_MEMMAP "ATI FireGL V7350"
+ 0x1002 0x7140 CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1600"
+ 0x1002 0x7141 CHIP_RV515|RADEON_NEW_MEMMAP "ATI RV505"
+ 0x1002 0x7142 CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1300/X1550"
+ 0x1002 0x7143 CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1550"
+ 0x1002 0x7144 CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI M54-GL"
+ 0x1002 0x7145 CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1400"
+ 0x1002 0x7146 CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1300/X1550"
+ 0x1002 0x7147 CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1550 64-bit"
+ 0x1002 0x7149 CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1300"
+ 0x1002 0x714A CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1300"
+ 0x1002 0x714B CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1300"
+ 0x1002 0x714C CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1300"
+ 0x1002 0x714D CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1300"
+ 0x1002 0x714E CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1300"
+ 0x1002 0x714F CHIP_RV515|RADEON_NEW_MEMMAP "ATI RV505"
+ 0x1002 0x7151 CHIP_RV515|RADEON_NEW_MEMMAP "ATI RV505"
+ 0x1002 0x7152 CHIP_RV515|RADEON_NEW_MEMMAP "ATI FireGL V3300"
+ 0x1002 0x7153 CHIP_RV515|RADEON_NEW_MEMMAP "ATI FireGL V3350"
+ 0x1002 0x715E CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1300"
+ 0x1002 0x715F CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1550 64-bit"
+ 0x1002 0x7180 CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1300/X1550"
+ 0x1002 0x7181 CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1600"
+ 0x1002 0x7183 CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1300/X1550"
+ 0x1002 0x7186 CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1450"
+ 0x1002 0x7187 CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1300/X1550"
+ 0x1002 0x7188 CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X2300"
+ 0x1002 0x718A CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X2300"
+ 0x1002 0x718B CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1350"
+ 0x1002 0x718C CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1350"
+ 0x1002 0x718D CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1450"
+ 0x1002 0x718F CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1300"
+ 0x1002 0x7193 CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1550"
+ 0x1002 0x7196 CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1350"
+ 0x1002 0x719B CHIP_RV515|RADEON_NEW_MEMMAP "ATI FireMV 2250"
+ 0x1002 0x719F CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1550 64-bit"
+ 0x1002 0x71C0 CHIP_RV530|RADEON_NEW_MEMMAP "ATI Radeon X1600"
+ 0x1002 0x71C1 CHIP_RV530|RADEON_NEW_MEMMAP "ATI Radeon X1650"
+ 0x1002 0x71C2 CHIP_RV530|RADEON_NEW_MEMMAP "ATI Radeon X1600"
+ 0x1002 0x71C3 CHIP_RV530|RADEON_NEW_MEMMAP "ATI Radeon X1600"
+ 0x1002 0x71C4 CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility FireGL V5200"
+ 0x1002 0x71C5 CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1600"
+ 0x1002 0x71C6 CHIP_RV530|RADEON_NEW_MEMMAP "ATI Radeon X1650"
+ 0x1002 0x71C7 CHIP_RV530|RADEON_NEW_MEMMAP "ATI Radeon X1650"
+ 0x1002 0x71CD CHIP_RV530|RADEON_NEW_MEMMAP "ATI Radeon X1600"
+ 0x1002 0x71CE CHIP_RV530|RADEON_NEW_MEMMAP "ATI Radeon X1300 XT/X1600 Pro"
+ 0x1002 0x71D2 CHIP_RV530|RADEON_NEW_MEMMAP "ATI FireGL V3400"
+ 0x1002 0x71D4 CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility FireGL V5250"
+ 0x1002 0x71D5 CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1700"
+ 0x1002 0x71D6 CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1700 XT"
+ 0x1002 0x71DA CHIP_RV530|RADEON_NEW_MEMMAP "ATI FireGL V5200"
+ 0x1002 0x71DE CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1700"
+ 0x1002 0x7200 CHIP_RV530|RADEON_NEW_MEMMAP "ATI  Radeon X2300HD"
+ 0x1002 0x7210 CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon HD 2300"
+ 0x1002 0x7211 CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon HD 2300"
+ 0x1002 0x7240 CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1950"
+ 0x1002 0x7243 CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900"
+ 0x1002 0x7244 CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1950"
+ 0x1002 0x7245 CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900"
+ 0x1002 0x7246 CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900"
+ 0x1002 0x7247 CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900"
+ 0x1002 0x7248 CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900"
+ 0x1002 0x7249 CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900"
+ 0x1002 0x724A CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900"
+ 0x1002 0x724B CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900"
+ 0x1002 0x724C CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900"
+ 0x1002 0x724D CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900"
+ 0x1002 0x724E CHIP_R580|RADEON_NEW_MEMMAP "ATI AMD Stream Processor"
+ 0x1002 0x724F CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900"
+ 0x1002 0x7280 CHIP_RV570|RADEON_NEW_MEMMAP "ATI Radeon X1950"
+ 0x1002 0x7281 CHIP_RV560|RADEON_NEW_MEMMAP "ATI RV560"
+ 0x1002 0x7283 CHIP_RV560|RADEON_NEW_MEMMAP "ATI RV560"
+ 0x1002 0x7284 CHIP_R580|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1900"
+ 0x1002 0x7287 CHIP_RV560|RADEON_NEW_MEMMAP "ATI RV560"
+ 0x1002 0x7288 CHIP_RV570|RADEON_NEW_MEMMAP "ATI Radeon X1950 GT"
+ 0x1002 0x7289 CHIP_RV570|RADEON_NEW_MEMMAP "ATI RV570"
+ 0x1002 0x728B CHIP_RV570|RADEON_NEW_MEMMAP "ATI RV570"
+ 0x1002 0x728C CHIP_RV570|RADEON_NEW_MEMMAP "ATI ATI FireGL V7400"
+ 0x1002 0x7290 CHIP_RV560|RADEON_NEW_MEMMAP "ATI RV560"
+ 0x1002 0x7291 CHIP_RV560|RADEON_NEW_MEMMAP "ATI Radeon X1650"
+ 0x1002 0x7293 CHIP_RV560|RADEON_NEW_MEMMAP "ATI Radeon X1650"
+ 0x1002 0x7297 CHIP_RV560|RADEON_NEW_MEMMAP "ATI RV560"
  0x1002 0x7834 CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP "ATI Radeon RS350 9000/9100 IGP"
  0x1002 0x7835 CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon RS350 Mobility IGP"
  
  0x1106 0x3343 0 "VIA P4M890"
  0x1106 0x3230 VIA_DX9_0 "VIA K8M890"
  0x1106 0x3157 VIA_PRO_GROUP_A "VIA CX700"
+ 0x1106 0x3371 VIA_DX9_0 "VIA P4M900 / VN896"
  
  [i810]
  0x8086 0x7121 0 "Intel i810 GMCH"
  0x8086 0x3582 CHIP_I8XX "Intel i852GM/i855GM GMCH"
  0x8086 0x2572 CHIP_I8XX "Intel i865G GMCH"
  0x8086 0x2582 CHIP_I9XX|CHIP_I915 "Intel i915G"
+ 0x8086 0x258a CHIP_I9XX|CHIP_I915 "Intel E7221 (i915)"
  0x8086 0x2592 CHIP_I9XX|CHIP_I915 "Intel i915GM"
  0x8086 0x2772 CHIP_I9XX|CHIP_I915 "Intel i945G"
  0x8086 0x27A2 CHIP_I9XX|CHIP_I915 "Intel i945GM"
  0x8086 0x29C2 CHIP_I9XX|CHIP_I915 "Intel G33"
  0x8086 0x29B2 CHIP_I9XX|CHIP_I915 "Intel Q35"
  0x8086 0x29D2 CHIP_I9XX|CHIP_I915 "Intel Q33"
+ 0x8086 0x2A42 CHIP_I9XX|CHIP_I965 "Intel Integrated Graphics Device"
  
  [imagine]
  0x105d 0x2309 IMAGINE_128 "Imagine 128"
diff --combined shared-core/i915_dma.c
@@@ -38,8 -38,8 +38,8 @@@
   */
  int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_ring_buffer *ring = &(dev_priv->ring);
        u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
        int i;
  
@@@ -63,8 -63,8 +63,8 @@@
  
  void i915_kernel_lost_context(struct drm_device * dev)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_ring_buffer *ring = &(dev_priv->ring);
  
        ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
        ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
@@@ -73,8 -73,9 +73,8 @@@
                ring->space += ring->Size;
  }
  
 -static int i915_dma_cleanup(struct drm_device * dev)
 +int i915_dma_cleanup(struct drm_device * dev)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
        /* Make sure interrupts are disabled here because the uninstall ioctl
         * may not have been called from userspace and after dev_private
         * is freed, it's too late.
        if (dev->irq)
                drm_irq_uninstall(dev);
  
 -      if (dev_priv->ring.virtual_start) {
 -              drm_core_ioremapfree(&dev_priv->ring.map, dev);
 -              dev_priv->ring.virtual_start = 0;
 -              dev_priv->ring.map.handle = 0;
 -              dev_priv->ring.map.size = 0;
 -      }
 -
 -      if (dev_priv->status_page_dmah) {
 -              drm_pci_free(dev, dev_priv->status_page_dmah);
 -              dev_priv->status_page_dmah = NULL;
 -              /* Need to rewrite hardware status page */
 -              I915_WRITE(0x02080, 0x1ffff000);
 -      }
 -
 -      if (dev_priv->status_gfx_addr) {
 -              dev_priv->status_gfx_addr = 0;
 -              drm_core_ioremapfree(&dev_priv->hws_map, dev);
 -              I915_WRITE(0x02080, 0x1ffff000);
 -      }
 -
        return 0;
  }
  
  static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
  
        dev_priv->sarea = drm_getsarea(dev);
        if (!dev_priv->sarea) {
  
  static int i915_dma_resume(struct drm_device * dev)
  {
 -      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 +      struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
  
        DRM_DEBUG("\n");
  
  static int i915_dma_init(struct drm_device *dev, void *data,
                         struct drm_file *file_priv)
  {
 -      drm_i915_init_t *init = data;
 +      struct drm_i915_init *init = data;
        int retcode = 0;
  
        switch (init->func) {
@@@ -315,7 -336,7 +315,7 @@@ static int validate_cmd(int cmd
  static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
                          int dwords)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        int i;
        RING_LOCALS;
  
@@@ -356,7 -377,7 +356,7 @@@ static int i915_emit_box(struct drm_dev
                         struct drm_clip_rect __user * boxes,
                         int i, int DR1, int DR4)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_clip_rect box;
        RING_LOCALS;
  
  
  void i915_emit_breadcrumb(struct drm_device *dev)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        RING_LOCALS;
  
        if (++dev_priv->counter > BREADCRUMB_MASK) {
  
  int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t flush_cmd = CMD_MI_FLUSH;
        RING_LOCALS;
  
  
  
  static int i915_dispatch_cmdbuffer(struct drm_device * dev,
 -                                 drm_i915_cmdbuffer_t * cmd)
 +                                 struct drm_i915_cmdbuffer * cmd)
  {
  #ifdef I915_HAVE_FENCE
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
  #endif
        int nbox = cmd->num_cliprects;
        int i = 0, count, ret;
  static int i915_dispatch_batchbuffer(struct drm_device * dev,
                                     drm_i915_batchbuffer_t * batch)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_clip_rect __user *boxes = batch->cliprects;
        int nbox = batch->num_cliprects;
        int i = 0, count;
  
  static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        u32 num_pages, current_page, next_page, dspbase;
        int shift = 2 * plane, x, y;
        RING_LOCALS;
  
  void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        int i;
  
        DRM_DEBUG("planes=0x%x pfCurrentPage=%d\n",
  
  static int i915_quiescent(struct drm_device *dev)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
  
        i915_kernel_lost_context(dev);
        return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
@@@ -622,7 -643,7 +622,7 @@@ static int i915_flush_ioctl(struct drm_
  static int i915_batchbuffer(struct drm_device *dev, void *data,
                            struct drm_file *file_priv)
  {
 -      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 +      struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
        drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
            dev_priv->sarea_priv;
        drm_i915_batchbuffer_t *batch = data;
  static int i915_cmdbuffer(struct drm_device *dev, void *data,
                          struct drm_file *file_priv)
  {
 -      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 -      drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
 +      struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
 +      struct drm_i915_sarea *sarea_priv = (struct drm_i915_sarea *)
            dev_priv->sarea_priv;
 -      drm_i915_cmdbuffer_t *cmdbuf = data;
 +      struct drm_i915_cmdbuffer *cmdbuf = data;
        int ret;
  
        DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
@@@ -718,9 -739,15 +718,15 @@@ int i915_apply_reloc(struct drm_file *f
        unsigned index;
        unsigned long new_cmd_offset;
        u32 val;
-       int ret;
+       int ret, i;
+       int buf_index = -1;
+       for (i = 0; i <= num_buffers; i++)
+               if (buffers[i].buffer)
+                       if (reloc[2] == buffers[i].buffer->base.hash.key)
+                               buf_index = i;
  
-       if (reloc[2] >= num_buffers) {
+       if (buf_index == -1) {
                DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]);
                return -EINVAL;
        }
         * Short-circuit relocations that were correctly
         * guessed by the client
         */
-       if (buffers[reloc[2]].presumed_offset_correct && !DRM_DEBUG_RELOCATION)
+       if (buffers[buf_index].presumed_offset_correct && !DRM_DEBUG_RELOCATION)
                return 0;
  
        new_cmd_offset = reloc[0];
                relocatee->page_offset = (relocatee->offset & PAGE_MASK);
        }
  
-       val = buffers[reloc[2]].buffer->offset;
+       val = buffers[buf_index].buffer->offset;
        index = (reloc[0] - relocatee->page_offset) >> 2;
  
        /* add in validate */
        val = val + reloc[1];
  
        if (DRM_DEBUG_RELOCATION) {
-               if (buffers[reloc[2]].presumed_offset_correct &&
+               if (buffers[buf_index].presumed_offset_correct &&
                    relocatee->data_page[index] != val) {
                        DRM_DEBUG ("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n",
-                                  reloc[0], reloc[1], reloc[2], relocatee->data_page[index], val);
+                                  reloc[0], reloc[1], buf_index, relocatee->data_page[index], val);
                }
        }
        relocatee->data_page[index] = val;
  
  int i915_process_relocs(struct drm_file *file_priv,
                        uint32_t buf_handle,
-                       uint32_t *reloc_buf_handle,
+                       uint32_t __user **reloc_user_ptr,
                        struct i915_relocatee_info *relocatee,
                        struct drm_i915_validate_buffer *buffers,
                        uint32_t num_buffers)
  {
-       struct drm_device *dev = file_priv->minor->dev;
-       struct drm_buffer_object *reloc_list_object;
-       uint32_t cur_handle = *reloc_buf_handle;
-       uint32_t *reloc_page;
-       int ret, reloc_is_iomem, reloc_stride;
-       uint32_t num_relocs, reloc_offset, reloc_end, reloc_page_offset, next_offset, cur_offset;
-       struct drm_bo_kmap_obj reloc_kmap;
-       memset(&reloc_kmap, 0, sizeof(reloc_kmap));
-       mutex_lock(&dev->struct_mutex);
-       reloc_list_object = drm_lookup_buffer_object(file_priv, cur_handle, 1);
-       mutex_unlock(&dev->struct_mutex);
-       if (!reloc_list_object)
-               return -EINVAL;
+       int ret, reloc_stride;
+       uint32_t cur_offset;
+       uint32_t reloc_count;
+       uint32_t reloc_type;
+       uint32_t reloc_buf_size;
+       uint32_t *reloc_buf = NULL;
+       int i;
  
-       ret = drm_bo_kmap(reloc_list_object, 0, 1, &reloc_kmap);
+       /* do a copy from user from the user ptr */
+       ret = get_user(reloc_count, *reloc_user_ptr);
        if (ret) {
                DRM_ERROR("Could not map relocation buffer.\n");
                goto out;
        }
  
-       reloc_page = drm_bmo_virtual(&reloc_kmap, &reloc_is_iomem);
-       num_relocs = reloc_page[0] & 0xffff;
+       ret = get_user(reloc_type, (*reloc_user_ptr)+1);
+       if (ret) {
+               DRM_ERROR("Could not map relocation buffer.\n");
+               goto out;
+       }
  
-       if ((reloc_page[0] >> 16) & 0xffff) {
+       if (reloc_type != 0) {
                DRM_ERROR("Unsupported relocation type requested\n");
+               ret = -EINVAL;
                goto out;
        }
  
-       /* get next relocate buffer handle */
-       *reloc_buf_handle = reloc_page[1];
-       reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); /* may be different for other types of relocs */
-       DRM_DEBUG("num relocs is %d, next is %08X\n", num_relocs, reloc_page[1]);
-       reloc_page_offset = 0;
-       reloc_offset = I915_RELOC_HEADER * sizeof(uint32_t);
-       reloc_end = reloc_offset + (num_relocs * reloc_stride);
-       do {
-               next_offset = drm_bo_offset_end(reloc_offset, reloc_end);
+       reloc_buf_size = (I915_RELOC_HEADER + (reloc_count * I915_RELOC0_STRIDE)) * sizeof(uint32_t);
+       reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
+       if (!reloc_buf) {
+               DRM_ERROR("Out of memory for reloc buffer\n");
+               ret = -ENOMEM;
+               goto out;
+       }
  
-               do {
-                       cur_offset = ((reloc_offset + reloc_page_offset) & ~PAGE_MASK) / sizeof(uint32_t);
-                       ret = i915_apply_reloc(file_priv, num_buffers,
-                                        buffers, relocatee, &reloc_page[cur_offset]);
-                       if (ret)
-                               goto out;
+       if (copy_from_user(reloc_buf, *reloc_user_ptr, reloc_buf_size)) {
+               ret = -EFAULT;
+               goto out;
+       }
  
-                       reloc_offset += reloc_stride;
-               } while (reloc_offset < next_offset);
+       /* get next relocate buffer handle */
+       *reloc_user_ptr = (uint32_t *)*(unsigned long *)&reloc_buf[2];
  
-               drm_bo_kunmap(&reloc_kmap);
+       reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); /* may be different for other types of relocs */
  
-               reloc_offset = next_offset;
-               if (reloc_offset != reloc_end) {
-                       ret = drm_bo_kmap(reloc_list_object, reloc_offset >> PAGE_SHIFT, 1, &reloc_kmap);
-                       if (ret) {
-                               DRM_ERROR("Could not map relocation buffer.\n");
-                               goto out;
-                       }
+       DRM_DEBUG("num relocs is %d, next is %p\n", reloc_count, *reloc_user_ptr);
  
-                       reloc_page = drm_bmo_virtual(&reloc_kmap, &reloc_is_iomem);
-                       reloc_page_offset = reloc_offset & ~PAGE_MASK;
-               }
+       for (i = 0; i < reloc_count; i++) {
+               cur_offset = I915_RELOC_HEADER + (i * I915_RELOC0_STRIDE);
+                 
+               ret = i915_apply_reloc(file_priv, num_buffers, buffers,
+                                      relocatee, reloc_buf + cur_offset);
+               if (ret)
+                       goto out;
+       }
  
-       } while (reloc_offset != reloc_end);
  out:
+       if (reloc_buf)
+               kfree(reloc_buf);
        drm_bo_kunmap(&relocatee->kmap);
        relocatee->data_page = NULL;
  
-       drm_bo_kunmap(&reloc_kmap);
-       mutex_lock(&dev->struct_mutex);
-       drm_bo_usage_deref_locked(&reloc_list_object);
-       mutex_unlock(&dev->struct_mutex);
        return ret;
  }
  
  static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,
-                          drm_handle_t buf_reloc_handle,
+                          uint32_t __user *reloc_user_ptr,
                           struct drm_i915_validate_buffer *buffers,
                           uint32_t buf_count)
  {
 -      struct drm_device *dev = file_priv->head->dev;
 +      struct drm_device *dev = file_priv->minor->dev;
        struct i915_relocatee_info relocatee;
        int ret = 0;
        int b;
                goto out_err;
        }
  
-       while (buf_reloc_handle) {
-               ret = i915_process_relocs(file_priv, buf_handle, &buf_reloc_handle, &relocatee, buffers, buf_count);
+       while (reloc_user_ptr) {
+               ret = i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr, &relocatee, buffers, buf_count);
                if (ret) {
                        DRM_ERROR("process relocs failed\n");
                        break;
@@@ -926,9 -938,9 +917,9 @@@ int i915_validate_buffer_list(struct dr
        unsigned long next = 0;
        int ret = 0;
        unsigned buf_count = 0;
 -      struct drm_device *dev = file_priv->head->dev;
 +      struct drm_device *dev = file_priv->minor->dev;
-       uint32_t buf_reloc_handle, buf_handle;
+       uint32_t buf_handle;
+       uint32_t __user *reloc_user_ptr;
  
        do {
                if (buf_count >= *num_buffers) {
                }
  
                buf_handle = req->bo_req.handle;
-               buf_reloc_handle = arg.reloc_handle;
+               reloc_user_ptr = (uint32_t *)(unsigned long)arg.reloc_ptr;
  
-               if (buf_reloc_handle) {
-                       ret = i915_exec_reloc(file_priv, buf_handle, buf_reloc_handle, buffers, buf_count);
+               if (reloc_user_ptr) {
+                       ret = i915_exec_reloc(file_priv, buf_handle, reloc_user_ptr, buffers, buf_count);
                        if (ret)
                                goto out_err;
                        DRM_MEMORYBARRIER();
                }
  
                rep.ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
-                                                req->bo_req.fence_class,
-                                                req->bo_req.flags,
-                                                req->bo_req.mask,
+                                                req->bo_req.flags, req->bo_req.mask,
                                                 req->bo_req.hint,
-                                                0,
+                                                req->bo_req.fence_class, 0,
                                                 &rep.bo_info,
                                                 &buffers[buf_count].buffer);
  
@@@ -1019,11 -1029,11 +1008,11 @@@ out_err
  static int i915_execbuffer(struct drm_device *dev, void *data,
                           struct drm_file *file_priv)
  {
 -      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 +      struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
        drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
                dev_priv->sarea_priv;
        struct drm_i915_execbuffer *exec_buf = data;
 -      struct _drm_i915_batchbuffer *batch = &exec_buf->batch;
 +      struct drm_i915_batchbuffer *batch = &exec_buf->batch;
        struct drm_fence_arg *fence_arg = &exec_buf->fence_arg;
        int num_buffers;
        int ret;
        sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
  
        /* fence */
-       ret = drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence);
+       ret = drm_fence_buffer_objects(dev, NULL, fence_arg->flags, 
+                                      NULL, &fence);
        if (ret)
                goto out_err0;
  
@@@ -1122,14 -1133,14 +1112,14 @@@ out_free
  }
  #endif
  
 -static int i915_do_cleanup_pageflip(struct drm_device * dev)
 +int i915_do_cleanup_pageflip(struct drm_device * dev)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      int i, planes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      int i, planes, num_pages;
  
        DRM_DEBUG("\n");
 -
 -      for (i = 0, planes = 0; i < 2; i++)
 +      num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
 +      for (i = 0, planes = 0; i < 2; i++) {
                if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) {
                        dev_priv->sarea_priv->pf_current_page =
                                (dev_priv->sarea_priv->pf_current_page &
  
                        planes |= 1 << i;
                }
 +      }
  
        if (planes)
                i915_dispatch_flip(dev, planes, 0);
  
  static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
  {
 -      drm_i915_flip_t *param = data;
 +      struct drm_i915_flip *param = data;
  
        DRM_DEBUG("\n");
  
  static int i915_getparam(struct drm_device *dev, void *data,
                         struct drm_file *file_priv)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      drm_i915_getparam_t *param = data;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_getparam *param = data;
        int value;
  
        if (!dev_priv) {
-               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               DRM_ERROR("called with no initialization\n");
                return -EINVAL;
        }
  
  static int i915_setparam(struct drm_device *dev, void *data,
                         struct drm_file *file_priv)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        drm_i915_setparam_t *param = data;
  
        if (!dev_priv) {
-               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               DRM_ERROR("called with no initialization\n");
                return -EINVAL;
        }
  
@@@ -1245,14 -1255,14 +1235,14 @@@ static int i915_mmio(struct drm_device 
                     struct drm_file *file_priv)
  {
        uint32_t buf[8];
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      drm_i915_mmio_entry_t *e;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      drm_i915_mmio_entry_t *e;        
        drm_i915_mmio_t *mmio = data;
        void __iomem *base;
        int i;
  
        if (!dev_priv) {
-               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               DRM_ERROR("called with no initialization\n");
                return -EINVAL;
        }
  
  static int i915_set_status_page(struct drm_device *dev, void *data,
                                struct drm_file *file_priv)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        drm_i915_hws_addr_t *hws = data;
  
        if (!dev_priv) {
-               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               DRM_ERROR("called with no initialization\n");
                return -EINVAL;
        }
        DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
        dev_priv->hw_status_page = dev_priv->hws_map.handle;
  
        memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
 -      I915_WRITE(0x02080, dev_priv->status_gfx_addr);
 +      I915_WRITE(I915REG_HWS_PGA, dev_priv->status_gfx_addr);
        DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
                        dev_priv->status_gfx_addr);
        DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
        return 0;
  }
  
 -int i915_driver_load(struct drm_device *dev, unsigned long flags)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      unsigned long base, size;
 -      int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
 -
 -      /* i915 has 4 more counters */
 -      dev->counters += 4;
 -      dev->types[6] = _DRM_STAT_IRQ;
 -      dev->types[7] = _DRM_STAT_PRIMARY;
 -      dev->types[8] = _DRM_STAT_SECONDARY;
 -      dev->types[9] = _DRM_STAT_DMA;
 -
 -      dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
 -      if (dev_priv == NULL)
 -              return -ENOMEM;
 -
 -      memset(dev_priv, 0, sizeof(drm_i915_private_t));
 -
 -      dev->dev_private = (void *)dev_priv;
 -
 -      /* Add register map (needed for suspend/resume) */
 -      base = drm_get_resource_start(dev, mmio_bar);
 -      size = drm_get_resource_len(dev, mmio_bar);
 -
 -      ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
 -              _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
 -
 -#ifdef __linux__
 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
 -      intel_init_chipset_flush_compat(dev);
 -#endif
 -#endif
 -
 -      return ret;
 -}
 -
 -int i915_driver_unload(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
 -      if (dev_priv->mmio_map)
 -              drm_rmmap(dev, dev_priv->mmio_map);
 -
 -      drm_free(dev->dev_private, sizeof(drm_i915_private_t),
 -               DRM_MEM_DRIVER);
 -#ifdef __linux__
 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
 -      intel_fini_chipset_flush_compat(dev);
 -#endif
 -#endif
 -      return 0;
 -}
 -
 -void i915_driver_lastclose(struct drm_device * dev)
 -{
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -
 -      if (drm_getsarea(dev) && dev_priv->sarea_priv)
 -              i915_do_cleanup_pageflip(dev);
 -      if (dev_priv->agp_heap)
 -              i915_mem_takedown(&(dev_priv->agp_heap));
 -
 -      i915_dma_cleanup(dev);
 -}
 -
 -void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
 -{
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      i915_mem_release(dev, file_priv, dev_priv->agp_heap);
 -}
 -
  struct drm_ioctl_desc i915_ioctls[] = {
        DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
@@@ -1368,3 -1450,10 +1358,3 @@@ int i915_driver_device_is_agp(struct dr
        return 1;
  }
  
 -int i915_driver_firstopen(struct drm_device *dev)
 -{
 -#ifdef I915_HAVE_BUFFER
 -      drm_bo_driver_init(dev);
 -#endif
 -      return 0;
 -}
diff --combined shared-core/i915_drm.h
@@@ -39,7 -39,7 +39,7 @@@
                                 * of chars for next/prev indices */
  #define I915_LOG_MIN_TEX_REGION_SIZE 14
  
 -typedef struct _drm_i915_init {
 +typedef struct drm_i915_init {
        enum {
                I915_INIT_DMA = 0x01,
                I915_CLEANUP_DMA = 0x02,
@@@ -63,7 -63,7 +63,7 @@@
        unsigned int chipset;
  } drm_i915_init_t;
  
 -typedef struct _drm_i915_sarea {
 +typedef struct drm_i915_sarea {
        struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
        int last_upload;        /* last time texture was uploaded */
        int last_enqueue;       /* last time a buffer was enqueued */
@@@ -196,7 -196,7 +196,7 @@@ typedef struct drm_i915_flip 
  /* Allow drivers to submit batchbuffers directly to hardware, relying
   * on the security mechanisms provided by hardware.
   */
 -typedef struct _drm_i915_batchbuffer {
 +typedef struct drm_i915_batchbuffer {
        int start;              /* agp offset */
        int used;               /* nr bytes in use */
        int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
  /* As above, but pass a pointer to userspace buffer which can be
   * validated by the kernel prior to sending to hardware.
   */
 -typedef struct _drm_i915_cmdbuffer {
 +typedef struct drm_i915_cmdbuffer {
        char __user *buf;       /* pointer to userspace command buffer */
        int sz;                 /* nr bytes in buf */
        int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
@@@ -329,9 -329,9 +329,9 @@@ typedef struct drm_i915_hws_addr 
  
  /*
   * Relocation header is 4 uint32_ts
-  * 0 - (16-bit relocation type << 16)| 16 bit reloc count
-  * 1 - buffer handle for another list of relocs
-  * 2-3 - spare.
+  * 0 - 32 bit reloc count
+  * 1 - 32-bit relocation type
+  * 2-3 - 64-bit user buffer handle ptr for another list of relocs.
   */
  #define I915_RELOC_HEADER 4
  
   * type 0 relocation has 4-uint32_t stride
   * 0 - offset into buffer
   * 1 - delta to add in
-  * 2 - index into buffer list
+  * 2 - buffer handle
   * 3 - reserved (for optimisations later).
   */
  #define I915_RELOC_TYPE_0 0
  
  struct drm_i915_op_arg {
        uint64_t next;
-       uint32_t reloc_handle;
+       uint64_t reloc_ptr;
        int handled;
        union {
                struct drm_bo_op_req req;
  struct drm_i915_execbuffer {
        uint64_t ops_list;
        uint32_t num_buffers;
 -      struct _drm_i915_batchbuffer batch;
 +      struct drm_i915_batchbuffer batch;
        drm_context_t context; /* for lockless use in the future */
        struct drm_fence_arg fence_arg;
  };
diff --combined shared-core/i915_drv.h
@@@ -71,7 -71,7 +71,7 @@@
  #define I915_MAX_VALIDATE_BUFFERS 4096
  #endif
  
 -typedef struct _drm_i915_ring_buffer {
 +struct drm_i915_ring_buffer {
        int tail_mask;
        unsigned long Start;
        unsigned long End;
@@@ -81,7 -81,7 +81,7 @@@
        int tail;
        int space;
        drm_local_map_t map;
 -} drm_i915_ring_buffer_t;
 +};
  
  struct mem_block {
        struct mem_block *next;
        struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
  };
  
 -typedef struct _drm_i915_vbl_swap {
 +struct drm_i915_vbl_swap {
        struct list_head head;
        drm_drawable_t drw_id;
        unsigned int plane;
        unsigned int sequence;
        int flip;
 -} drm_i915_vbl_swap_t;
 +};
  
 -typedef struct drm_i915_private {
 +struct drm_i915_private {
 +      struct drm_buffer_object *ring_buffer;
        drm_local_map_t *sarea;
        drm_local_map_t *mmio_map;
  
 -      drm_i915_sarea_t *sarea_priv;
 -      drm_i915_ring_buffer_t ring;
 +      unsigned long mmiobase;
 +      unsigned long mmiolen;
 +
 +      struct drm_i915_sarea *sarea_priv;
 +      struct drm_i915_ring_buffer ring;
  
 -      drm_dma_handle_t *status_page_dmah;
 +      struct drm_dma_handle *status_page_dmah;
        void *hw_status_page;
        dma_addr_t dma_status_page;
        uint32_t counter;
        uint32_t irq_enable_reg;
        int irq_enabled;
  
 +      struct workqueue_struct *wq;
 +
  #ifdef I915_HAVE_FENCE
        uint32_t flush_sequence;
        uint32_t flush_flags;
  #endif
  
        DRM_SPINTYPE swaps_lock;
 -      drm_i915_vbl_swap_t vbl_swaps;
 +      struct drm_i915_vbl_swap vbl_swaps;
        unsigned int swaps_pending;
  
 -      /* Register state */
 +      /* LVDS info */
 +      int backlight_duty_cycle;  /* restore backlight to this value */
 +      bool panel_wants_dither;
 +      struct drm_display_mode *panel_fixed_mode;
 +
 +      /* Register state */
        u8 saveLBB;
        u32 saveDSPACNTR;
        u32 saveDSPBCNTR;
        u8 saveDACMASK;
        u8 saveDACDATA[256*3]; /* 256 3-byte colors */
        u8 saveCR[36];
 -} drm_i915_private_t;
 +};
  
  enum intel_chip_family {
        CHIP_I8XX = 0x01,
@@@ -251,7 -240,7 +251,7 @@@ extern int i915_max_ioctl
                                /* i915_dma.c */
  extern void i915_kernel_lost_context(struct drm_device * dev);
  extern int i915_driver_load(struct drm_device *, unsigned long flags);
 -extern int i915_driver_unload(struct drm_device *);
 +extern int i915_driver_unload(struct drm_device *dev);
  extern void i915_driver_lastclose(struct drm_device * dev);
  extern void i915_driver_preclose(struct drm_device *dev,
                                 struct drm_file *file_priv);
@@@ -262,8 -251,6 +262,8 @@@ extern void i915_emit_breadcrumb(struc
  extern void i915_dispatch_flip(struct drm_device * dev, int pipes, int sync);
  extern int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush);
  extern int i915_driver_firstopen(struct drm_device *dev);
 +extern int i915_do_cleanup_pageflip(struct drm_device *dev);
 +extern int i915_dma_cleanup(struct drm_device *dev);
  
  /* i915_irq.c */
  extern int i915_irq_emit(struct drm_device *dev, void *data,
  extern int i915_irq_wait(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
  
- extern void i915_driver_wait_next_vblank(struct drm_device *dev, int pipe);
- extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
- extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
  extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
  extern void i915_driver_irq_preinstall(struct drm_device * dev);
- extern void i915_driver_irq_postinstall(struct drm_device * dev);
+ extern int i915_driver_irq_postinstall(struct drm_device * dev);
  extern void i915_driver_irq_uninstall(struct drm_device * dev);
  extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
  extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
  extern int i915_emit_irq(struct drm_device * dev);
- extern void i915_user_irq_on(struct drm_i915_private *dev_priv);
- extern void i915_user_irq_off(struct drm_i915_private *dev_priv);
 +extern void i915_enable_interrupt (struct drm_device *dev);
+ extern int i915_enable_vblank(struct drm_device *dev, int crtc);
+ extern void i915_disable_vblank(struct drm_device *dev, int crtc);
+ extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
  extern int i915_vblank_swap(struct drm_device *dev, void *data,
                            struct drm_file *file_priv);
 -extern void i915_user_irq_on(drm_i915_private_t *dev_priv);
 -extern void i915_user_irq_off(drm_i915_private_t *dev_priv);
++extern void i915_user_irq_on(struct drm_i915_private *dev_priv);
++extern void i915_user_irq_off(struct drm_i915_private *dev_priv);
  
  /* i915_mem.c */
  extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@@ -318,12 -304,12 +318,12 @@@ extern int i915_fence_has_irq(struct dr
  #ifdef I915_HAVE_BUFFER
  /* i915_buffer.c */
  extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev);
- extern int i915_fence_types(struct drm_buffer_object *bo, uint32_t *fclass,
-                           uint32_t *type);
+ extern int i915_fence_type(struct drm_buffer_object *bo, uint32_t *fclass,
+                          uint32_t *type);
  extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
  extern int i915_init_mem_type(struct drm_device *dev, uint32_t type,
                               struct drm_mem_type_manager *man);
- extern uint32_t i915_evict_mask(struct drm_buffer_object *bo);
+ extern uint64_t i915_evict_flags(struct drm_buffer_object *bo);
  extern int i915_move(struct drm_buffer_object *bo, int evict,
                int no_wait, struct drm_bo_mem_reg *new_mem);
  void i915_flush_ttm(struct drm_ttm *ttm);
@@@ -336,12 -322,6 +336,12 @@@ extern void intel_fini_chipset_flush_co
  #endif
  #endif
  
 +
 +/* modesetting */
 +extern void intel_modeset_init(struct drm_device *dev);
 +extern void intel_modeset_cleanup(struct drm_device *dev);
 +
 +
  #define I915_READ(reg)          DRM_READ32(dev_priv->mmio_map, (reg))
  #define I915_WRITE(reg,val)     DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
  #define I915_READ16(reg)      DRM_READ16(dev_priv->mmio_map, (reg))
  
  #define BEGIN_LP_RING(n) do {                         \
        if (I915_VERBOSE)                               \
-               DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n",  \
-                                (n), __FUNCTION__);           \
+               DRM_DEBUG("BEGIN_LP_RING(%d)\n",        \
+                                (n));                  \
        if (dev_priv->ring.space < (n)*4)                      \
                i915_wait_ring(dev, (n)*4, __FUNCTION__);      \
        outcount = 0;                                   \
        I915_WRITE(LP_RING + RING_TAIL, outring);                       \
  } while(0)
  
 +#define MI_NOOP       (0x00 << 23)
 +
  extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
  
 +/*
 + * The Bridge device's PCI config space has information about the
 + * fb aperture size and the amount of pre-reserved memory.
 + */
 +#define INTEL_GMCH_CTRL               0x52
 +#define INTEL_GMCH_ENABLED    0x4
 +#define INTEL_GMCH_MEM_MASK   0x1
 +#define INTEL_GMCH_MEM_64M    0x1
 +#define INTEL_GMCH_MEM_128M   0
 +
 +#define INTEL_855_GMCH_GMS_MASK               (0x7 << 4)
 +#define INTEL_855_GMCH_GMS_DISABLED   (0x0 << 4)
 +#define INTEL_855_GMCH_GMS_STOLEN_1M  (0x1 << 4)
 +#define INTEL_855_GMCH_GMS_STOLEN_4M  (0x2 << 4)
 +#define INTEL_855_GMCH_GMS_STOLEN_8M  (0x3 << 4)
 +#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4)
 +#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4)
 +
 +#define INTEL_915G_GMCH_GMS_STOLEN_48M        (0x6 << 4)
 +#define INTEL_915G_GMCH_GMS_STOLEN_64M        (0x7 << 4)
 +
  /* Extended config space */
  #define LBB 0xf4
  
  #define BB1_UNPROTECTED       (0<<0)
  #define BB2_END_ADDR_MASK     (~0x7)
  
 +#define I915REG_HWS_PGA               0x02080
 +
  /* Framebuffer compression */
  #define FBC_CFB_BASE          0x03200 /* 4k page aligned */
  #define FBC_LL_BASE           0x03204 /* 4k page aligned */
  #define I915REG_INT_ENABLE_R  0x020a0
  #define I915REG_INSTPM                0x020c0
  
+ #define PIPEADSL              0x70000
+ #define PIPEBDSL              0x71000
  #define I915REG_PIPEASTAT     0x70024
  #define I915REG_PIPEBSTAT     0x71024
+ /*
+  * The two pipe frame counter registers are not synchronized, so
+  * reading a stable value is somewhat tricky. The following code 
+  * should work:
+  *
+  *  do {
+  *    high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+  *             PIPE_FRAME_HIGH_SHIFT;
+  *    low1 =  ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
+  *             PIPE_FRAME_LOW_SHIFT);
+  *    high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+  *             PIPE_FRAME_HIGH_SHIFT);
+  *  } while (high1 != high2);
+  *  frame = (high1 << 8) | low1;
+  */
+ #define PIPEAFRAMEHIGH          0x70040
+ #define PIPEBFRAMEHIGH                0x71040
+ #define PIPE_FRAME_HIGH_MASK    0x0000ffff
+ #define PIPE_FRAME_HIGH_SHIFT   0
+ #define PIPEAFRAMEPIXEL         0x70044
+ #define PIPEBFRAMEPIXEL               0x71044
+ #define PIPE_FRAME_LOW_MASK     0xff000000
+ #define PIPE_FRAME_LOW_SHIFT    24
+ /*
+  * Pixel within the current frame is counted in the PIPEAFRAMEPIXEL register
+  * and is 24 bits wide.
+  */
+ #define PIPE_PIXEL_MASK         0x00ffffff
+ #define PIPE_PIXEL_SHIFT        0
  
  #define I915_VBLANK_INTERRUPT_ENABLE  (1UL<<17)
  #define I915_VBLANK_CLEAR             (1UL<<1)
  
 +#define GPIOA                 0x5010
 +#define GPIOB                 0x5014
 +#define GPIOC                 0x5018
 +#define GPIOD                 0x501c
 +#define GPIOE                 0x5020
 +#define GPIOF                 0x5024
 +#define GPIOG                 0x5028
 +#define GPIOH                 0x502c
 +# define GPIO_CLOCK_DIR_MASK          (1 << 0)
 +# define GPIO_CLOCK_DIR_IN            (0 << 1)
 +# define GPIO_CLOCK_DIR_OUT           (1 << 1)
 +# define GPIO_CLOCK_VAL_MASK          (1 << 2)
 +# define GPIO_CLOCK_VAL_OUT           (1 << 3)
 +# define GPIO_CLOCK_VAL_IN            (1 << 4)
 +# define GPIO_CLOCK_PULLUP_DISABLE    (1 << 5)
 +# define GPIO_DATA_DIR_MASK           (1 << 8)
 +# define GPIO_DATA_DIR_IN             (0 << 9)
 +# define GPIO_DATA_DIR_OUT            (1 << 9)
 +# define GPIO_DATA_VAL_MASK           (1 << 10)
 +# define GPIO_DATA_VAL_OUT            (1 << 11)
 +# define GPIO_DATA_VAL_IN             (1 << 12)
 +# define GPIO_DATA_PULLUP_DISABLE     (1 << 13)
 +
 +/* p317, 319
 + */
 +#define VCLK2_VCO_M        0x6008 /* treat as 16 bit? (includes msbs) */
 +#define VCLK2_VCO_N        0x600a
 +#define VCLK2_VCO_DIV_SEL  0x6012
 +
 +#define VCLK_DIVISOR_VGA0   0x6000
 +#define VCLK_DIVISOR_VGA1   0x6004
 +#define VCLK_POST_DIV     0x6010
 +/** Selects a post divisor of 4 instead of 2. */
 +# define VGA1_PD_P2_DIV_4     (1 << 15)
 +/** Overrides the p2 post divisor field */
 +# define VGA1_PD_P1_DIV_2     (1 << 13)
 +# define VGA1_PD_P1_SHIFT     8
 +/** P1 value is 2 greater than this field */
 +# define VGA1_PD_P1_MASK      (0x1f << 8)
 +/** Selects a post divisor of 4 instead of 2. */
 +# define VGA0_PD_P2_DIV_4     (1 << 7)
 +/** Overrides the p2 post divisor field */
 +# define VGA0_PD_P1_DIV_2     (1 << 5)
 +# define VGA0_PD_P1_SHIFT     0
 +/** P1 value is 2 greater than this field */
 +# define VGA0_PD_P1_MASK      (0x1f << 0)
 +
 +#define POST_DIV_SELECT        0x70
 +#define POST_DIV_1             0x00
 +#define POST_DIV_2             0x10
 +#define POST_DIV_4             0x20
 +#define POST_DIV_8             0x30
 +#define POST_DIV_16            0x40
 +#define POST_DIV_32            0x50
 +#define VCO_LOOP_DIV_BY_4M     0x00
 +#define VCO_LOOP_DIV_BY_16M    0x04
 +
  #define SRX_INDEX             0x3c4
  #define SRX_DATA              0x3c5
  #define SR01                  1
  #define PPCR                  0x61204
  #define PPCR_ON                       (1<<0)
  
 +#define DVOA                  0x61120
 +#define DVOA_ON                       (1<<31)
  #define DVOB                  0x61140
  #define DVOB_ON                       (1<<31)
  #define DVOC                  0x61160
  
  #define SRC_COPY_BLT_CMD                ((2<<29)|(0x43<<22)|4)
  #define XY_SRC_COPY_BLT_CMD           ((2<<29)|(0x53<<22)|6)
 +#define XY_MONO_SRC_COPY_IMM_BLT      ((2<<29)|(0x71<<22)|5)
  #define XY_SRC_COPY_BLT_WRITE_ALPHA   (1<<21)
  #define XY_SRC_COPY_BLT_WRITE_RGB     (1<<20)
 +#define   BLT_DEPTH_8                 (0<<24)
 +#define   BLT_DEPTH_16_565            (1<<24)
 +#define   BLT_DEPTH_16_1555           (2<<24)
 +#define   BLT_DEPTH_32                        (3<<24)
 +#define   BLT_ROP_GXCOPY              (0xcc<<16)
  
  #define MI_BATCH_BUFFER               ((0x30<<23)|1)
  #define MI_BATCH_BUFFER_START (0x31<<23)
  #define BACKLIGHT_MODULATION_FREQ_SHIFT               (17)
  
  #define BLC_PWM_CTL2          0x61250
 +
  /**
   * This is the most significant 15 bits of the number of backlight cycles in a
   * complete cycle of the modulated backlight control.
  #define BCLRPAT_B     0x61020
  #define VSYNCSHIFT_B  0x61028
  
+ #define HACTIVE_MASK  0x00000fff
+ #define VTOTAL_MASK   0x00001fff
+ #define VTOTAL_SHIFT  16
+ #define VACTIVE_MASK  0x00000fff
+ #define VBLANK_END_MASK       0x00001fff
+ #define VBLANK_END_SHIFT 16
+ #define VBLANK_START_MASK 0x00001fff
  #define PP_STATUS     0x61200
  # define PP_ON                                        (1 << 31)
  /**
   */
  
  #define SWF0                  0x71410
 +#define SWF1                  0x71414
 +#define SWF2                  0x71418
 +#define SWF3                  0x7141c
 +#define SWF4                  0x71420
 +#define SWF5                  0x71424
 +#define SWF6                  0x71428
  
 -/*
 - * 855 scratch registers.
 - */
  #define SWF10                 0x70410
 -
  #define SWF30                 0x72414
 +#define SWF31                 0x72418
 +#define SWF32                 0x7241c
  
  /*
   * Overlay registers.  These are overlay registers accessed via MMIO.
  #define OGAMC2                        0x3001c
  #define OGAMC1                        0x30020
  #define OGAMC0                        0x30024
 +
  /*
   * Palette registers
   */
  #define IS_I855(dev) ((dev)->pci_device == 0x3582)
  #define IS_I865G(dev) ((dev)->pci_device == 0x2572)
  
- #define IS_I915G(dev) (dev->pci_device == 0x2582)/* || dev->pci_device == PCI_DEVICE_ID_INTELPCI_CHIP_E7221_G)*/
+ #define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
  #define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
  #define IS_I945G(dev) ((dev)->pci_device == 0x2772)
  #define IS_I945GM(dev) ((dev)->pci_device == 0x27A2)
                       (dev)->pci_device == 0x2992 || \
                       (dev)->pci_device == 0x29A2 || \
                       (dev)->pci_device == 0x2A02 || \
-                      (dev)->pci_device == 0x2A12)
+                      (dev)->pci_device == 0x2A12 || \
+                      (dev)->pci_device == 0x2A42)
  
  #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
  
+ #define IS_IGD_GM(dev) ((dev)->pci_device == 0x2A42)
  #define IS_G33(dev)    ((dev)->pci_device == 0x29C2 ||        \
                        (dev)->pci_device == 0x29B2 ||  \
                        (dev)->pci_device == 0x29D2)
                      IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
  
  #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
-                       IS_I945GM(dev) || IS_I965GM(dev))
+                       IS_I945GM(dev) || IS_I965GM(dev) || IS_IGD_GM(dev))
  
  #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
  
diff --combined shared-core/i915_irq.c
  #include "i915_drm.h"
  #include "i915_drv.h"
  
 +#include "intel_drv.h"
 +
  #define USER_INT_FLAG (1<<1)
  #define VSYNC_PIPEB_FLAG (1<<5)
  #define VSYNC_PIPEA_FLAG (1<<7)
 +#define HOTPLUG_FLAG (1 << 17)
  
  #define MAX_NOPID ((u32)~0)
  
   * @dev: DRM device
   * @plane: plane to look for
   *
-  * We need to get the pipe associated with a given plane to correctly perform
-  * vblank driven swapping, and they may not always be equal.  So look up the
-  * pipe associated with @plane here.
+  * The Intel Mesa & 2D drivers call the vblank routines with a plane number
+  * rather than a pipe number, since they may not always be equal.  This routine
+  * maps the given @plane back to a pipe number.
   */
  static int
  i915_get_pipe(struct drm_device *dev, int plane)
  {
 -      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 +      struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
        u32 dspcntr;
  
        dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
  }
  
  /**
 -      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+  * i915_get_plane - return the the plane associated with a given pipe
+  * @dev: DRM device
+  * @pipe: pipe to look for
+  *
+  * The Intel Mesa & 2D drivers call the vblank routines with a plane number
+  * rather than a plane number, since they may not always be equal.  This routine
+  * maps the given @pipe back to a plane number.
+  */
+ static int
+ i915_get_plane(struct drm_device *dev, int pipe)
+ {
+       if (i915_get_pipe(dev, 0) == pipe)
+               return 0;
+       return 1;
+ }
+ /**
+  * i915_pipe_enabled - check if a pipe is enabled
+  * @dev: DRM device
+  * @pipe: pipe to check
+  *
+  * Reading certain registers when the pipe is disabled can hang the chip.
+  * Use this routine to make sure the PLL is running and the pipe is active
+  * before reading such registers if unsure.
+  */
+ static int
+ i915_pipe_enabled(struct drm_device *dev, int pipe)
+ {
++      struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+       unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
+       if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
+               return 1;
+       return 0;
+ }
+ /**
   * Emit a synchronous flip.
   *
   * This function must be called with the drawable spinlock held.
@@@ -69,8 -104,8 +107,8 @@@ static voi
  i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw,
                         int plane)
  {
 -      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 -      drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
 +      struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
 +      struct drm_i915_sarea *sarea_priv = dev_priv->sarea_priv;
        u16 x1, y1, x2, y2;
        int pf_planes = 1 << plane;
  
   */
  static void i915_vblank_tasklet(struct drm_device *dev)
  {
 -      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 +      struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
        struct list_head *list, *tmp, hits, *hit;
        int nhits, nrects, slice[2], upper[2], lower[2], i, num_pages;
-       unsigned counter[2] = { atomic_read(&dev->vbl_received),
-                               atomic_read(&dev->vbl_received2) };
+       unsigned counter[2];
        struct drm_drawable_info *drw;
 -      drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
 +      struct drm_i915_sarea *sarea_priv = dev_priv->sarea_priv;
        u32 cpp = dev_priv->cpp,  offsets[3];
        u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
                                XY_SRC_COPY_BLT_WRITE_ALPHA |
                          (cpp << 23) | (1 << 24);
        RING_LOCALS;
  
+       counter[0] = drm_vblank_count(dev, 0);
+       counter[1] = drm_vblank_count(dev, 1);
        DRM_DEBUG("\n");
  
        INIT_LIST_HEAD(&hits);
  
        /* Find buffer swaps scheduled for this vertical blank */
        list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
 -              drm_i915_vbl_swap_t *vbl_swap =
 -                      list_entry(list, drm_i915_vbl_swap_t, head);
 +              struct drm_i915_vbl_swap *vbl_swap =
 +                      list_entry(list, struct drm_i915_vbl_swap, head);
                int pipe = i915_get_pipe(dev, vbl_swap->plane);
  
                if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
  
                list_del(list);
                dev_priv->swaps_pending--;
+               drm_vblank_put(dev, pipe);
  
                DRM_SPINUNLOCK(&dev_priv->swaps_lock);
                DRM_SPINLOCK(&dev->drw_lock);
                }
  
                list_for_each(hit, &hits) {
 -                      drm_i915_vbl_swap_t *swap_cmp =
 -                              list_entry(hit, drm_i915_vbl_swap_t, head);
 +                      struct drm_i915_vbl_swap *swap_cmp =
 +                              list_entry(hit, struct drm_i915_vbl_swap, head);
                        struct drm_drawable_info *drw_cmp =
                                drm_get_drawable_info(dev, swap_cmp->drw_id);
  
                        lower[0] = lower[1] = sarea_priv->height;
  
                list_for_each(hit, &hits) {
 -                      drm_i915_vbl_swap_t *swap_hit =
 -                              list_entry(hit, drm_i915_vbl_swap_t, head);
 +                      struct drm_i915_vbl_swap *swap_hit =
 +                              list_entry(hit, struct drm_i915_vbl_swap, head);
                        struct drm_clip_rect *rect;
                        int num_rects, plane, front, back;
                        unsigned short top, bottom;
        DRM_SPINUNLOCK(&dev->drw_lock);
  
        list_for_each_safe(hit, tmp, &hits) {
 -              drm_i915_vbl_swap_t *swap_hit =
 -                      list_entry(hit, drm_i915_vbl_swap_t, head);
 +              struct drm_i915_vbl_swap *swap_hit =
 +                      list_entry(hit, struct drm_i915_vbl_swap, head);
  
                list_del(hit);
  
                drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
        }
  }
 -      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ #if 0
+ static int i915_in_vblank(struct drm_device *dev, int pipe)
+ {
 -      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+       unsigned long pipedsl, vblank, vtotal;
+       unsigned long vbl_start, vbl_end, cur_line;
+       pipedsl = pipe ? PIPEBDSL : PIPEADSL;
+       vblank = pipe ? VBLANK_B : VBLANK_A;
+       vtotal = pipe ? VTOTAL_B : VTOTAL_A;
+       vbl_start = I915_READ(vblank) & VBLANK_START_MASK;
+       vbl_end = (I915_READ(vblank) >> VBLANK_END_SHIFT) & VBLANK_END_MASK;
+       cur_line = I915_READ(pipedsl);
+       if (cur_line >= vbl_start)
+               return 1;
+       return 0;
+ }
+ #endif
+ u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
+ {
++      struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+       unsigned long high_frame;
+       unsigned long low_frame;
+       u32 high1, high2, low, count;
+       int pipe;
+       pipe = i915_get_pipe(dev, plane);
+       high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
+       low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+       if (!i915_pipe_enabled(dev, pipe)) {
+           printk(KERN_ERR "trying to get vblank count for disabled "
+                  "pipe %d\n", pipe);
+           return 0;
+       }
+       /*
+        * High & low register fields aren't synchronized, so make sure
+        * we get a low value that's stable across two reads of the high
+        * register.
+        */
+       do {
+               high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+                        PIPE_FRAME_HIGH_SHIFT);
+               low =  ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
+                       PIPE_FRAME_LOW_SHIFT);
+               high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+                        PIPE_FRAME_HIGH_SHIFT);
+       } while (high1 != high2);
+       count = (high1 << 8) | low;
+       /*
+        * If we're in the middle of the vblank period, the
+        * above regs won't have been updated yet, so return
+        * an incremented count to stay accurate
+        */
+ #if 0
+       if (i915_in_vblank(dev, pipe))
+               count++;
+ #endif
+       return count;
+ }
  
 +#define HOTPLUG_CMD_CRT 1
 +#define HOTPLUG_CMD_SDVOB 4
 +#define HOTPLUG_CMD_SDVOC 8
 +
 +static struct drm_device *hotplug_dev;
 +static int hotplug_cmd = 0;
 +static spinlock_t hotplug_lock = SPIN_LOCK_UNLOCKED;
 +
 +static void i915_hotplug_crt(struct drm_device *dev)
 +{
 +      struct drm_output *output;
 +      struct intel_output *iout;
 +
 +      mutex_lock(&dev->mode_config.mutex);
 +
 +      /* find the crt output */
 +      list_for_each_entry(output, &dev->mode_config.output_list, head) {
 +              iout = output->driver_private;
 +              if (iout->type == INTEL_OUTPUT_ANALOG)
 +                      break;
 +              else
 +                      iout = 0;
 +      }
 +
 +      if (iout == 0)
 +              goto unlock;
 +
 +      drm_hotplug_stage_two(dev, output);
 +
 +unlock:
 +      mutex_unlock(&dev->mode_config.mutex);
 +}
 +
 +static void i915_hotplug_sdvo(struct drm_device *dev, int sdvoB)
 +{
 +      struct drm_output *output = 0;
 +      enum drm_output_status status;
 +
 +      mutex_lock(&dev->mode_config.mutex);
 +
 +      output = intel_sdvo_find(dev, sdvoB);
 +
 +      if (!output) {
 +              DRM_ERROR("could not find sdvo%s output\n", sdvoB ? "B" : "C");
 +              goto unlock;
 +      }
 +
 +      status = output->funcs->detect(output);
 +
 +      if (status != output_status_connected)
 +              DRM_DEBUG("disconnect or unkown we don't do anything then\n");
 +      else
 +              drm_hotplug_stage_two(dev, output);
 +
 +      /* wierd hw bug, sdvo stop sending interupts */
 +      intel_sdvo_set_hotplug(output, 1);
 +
 +unlock:
 +      mutex_unlock(&dev->mode_config.mutex);
 +}
 +/*
 + * This code is called in a more safe envirmoent to handle the hotplugs.
 + * Add code here for hotplug love to userspace.
 + */
 +static void i915_hotplug_work_func(struct work_struct *work)
 +{
 +      struct drm_device *dev = hotplug_dev;
 +      int crt;
 +      int sdvoB;
 +      int sdvoC;
 +
 +      spin_lock(&hotplug_lock);
 +      crt = hotplug_cmd & HOTPLUG_CMD_CRT;
 +      sdvoB = hotplug_cmd & HOTPLUG_CMD_SDVOB;
 +      sdvoC = hotplug_cmd & HOTPLUG_CMD_SDVOC;
 +      hotplug_cmd = 0;
 +      spin_unlock(&hotplug_lock);
 +
 +      if (crt)
 +              i915_hotplug_crt(dev);
 +
 +      if (sdvoB)
 +              i915_hotplug_sdvo(dev, 1);
 +
 +      if (sdvoC)
 +              i915_hotplug_sdvo(dev, 0);
 +
 +}
 +
 +static int i915_run_hotplug_tasklet(struct drm_device *dev, uint32_t stat)
 +{
 +      static DECLARE_WORK(hotplug, i915_hotplug_work_func);
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +
 +      hotplug_dev = dev;
 +
 +      if (stat & CRT_HOTPLUG_INT_STATUS) {
 +              DRM_DEBUG("CRT event\n");
 +
 +              if (stat & CRT_HOTPLUG_MONITOR_MASK) {
 +                      spin_lock(&hotplug_lock);
 +                      hotplug_cmd |= HOTPLUG_CMD_CRT;
 +                      spin_unlock(&hotplug_lock);
 +              } else {
 +                      /* handle crt disconnects */
 +              }
 +      }
 +
 +      if (stat & SDVOB_HOTPLUG_INT_STATUS) {
 +              DRM_DEBUG("sDVOB event\n");
 +
 +              spin_lock(&hotplug_lock);
 +              hotplug_cmd |= HOTPLUG_CMD_SDVOB;
 +              spin_unlock(&hotplug_lock);
 +      }
 +
 +      if (stat & SDVOC_HOTPLUG_INT_STATUS) {
 +              DRM_DEBUG("sDVOC event\n");
 +
 +              spin_lock(&hotplug_lock);
 +              hotplug_cmd |= HOTPLUG_CMD_SDVOC;
 +              spin_unlock(&hotplug_lock);
 +      }
 +
 +      queue_work(dev_priv->wq, &hotplug);
 +
 +      return 0;
 +}
 +
  irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
  {
        struct drm_device *dev = (struct drm_device *) arg;
 -      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 -      u16 temp;
 +      struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
 +      u32 temp = 0;
 +      u32 temp2;
        u32 pipea_stats, pipeb_stats;
  
        pipea_stats = I915_READ(I915REG_PIPEASTAT);
        pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
  
 -      temp = I915_READ16(I915REG_INT_IDENTITY_R);
 +      /* On i8xx hw the IIR and IER are 16bit on i9xx its 32bit */
-       if (IS_I9XX(dev)) {
++      if (IS_I9XX(dev))
 +              temp = I915_READ(I915REG_INT_IDENTITY_R);
-       } else {
++      else
 +              temp = I915_READ16(I915REG_INT_IDENTITY_R);
-       }
 +
 +      temp2 = temp;
 +      temp &= (dev_priv->irq_enable_reg | USER_INT_FLAG);
  
  #if 0
-       DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
 +      /* ugly despamification of pipeb event irq */
 +      if (temp & (0xFFFFFFF ^ ((1 << 5) | (1 << 7)))) {
 +              DRM_DEBUG("IIR %08x\n", temp2);
 +              DRM_DEBUG("MSK %08x\n", dev_priv->irq_enable_reg | USER_INT_FLAG);
 +              DRM_DEBUG("M&I %08x\n", temp);
 +              DRM_DEBUG("HOT %08x\n", I915_READ(PORT_HOTPLUG_STAT));
 +      }
 +#else
 +#if 0
+       DRM_DEBUG("flag=%08x\n", temp);
  #endif
 +#endif
 +
        if (temp == 0)
                return IRQ_NONE;
  
 +      if (IS_I9XX(dev)) {
 +              I915_WRITE(I915REG_INT_IDENTITY_R, temp);
 +              (void) I915_READ(I915REG_INT_IDENTITY_R);
 +      } else {
 +              I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
 +              (void) I915_READ16(I915REG_INT_IDENTITY_R);
 +      }
 +
+       /*
+        * Clear the PIPE(A|B)STAT regs before the IIR otherwise
+        * we may get extra interrupts.
+        */
+       if (temp & VSYNC_PIPEA_FLAG) {
+               drm_handle_vblank(dev, i915_get_plane(dev, 0));
+               I915_WRITE(I915REG_PIPEASTAT,
+                          pipea_stats | I915_VBLANK_INTERRUPT_ENABLE |
+                          I915_VBLANK_CLEAR);
+       }
++
+       if (temp & VSYNC_PIPEB_FLAG) {
+               drm_handle_vblank(dev, i915_get_plane(dev, 1));
+               I915_WRITE(I915REG_PIPEBSTAT,
+                          pipeb_stats | I915_VBLANK_INTERRUPT_ENABLE |
+                          I915_VBLANK_CLEAR);
+       }
+       I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
+       (void) I915_READ16(I915REG_INT_IDENTITY_R); /* Flush posted write */
 +      DRM_READMEMORYBARRIER();
 +
+       temp &= (dev_priv->irq_enable_reg | USER_INT_FLAG | VSYNC_PIPEA_FLAG |
+                VSYNC_PIPEB_FLAG);
        dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
  
        if (temp & USER_INT_FLAG) {
        }
  
        if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
-               int vblank_pipe = dev_priv->vblank_pipe;
-               if ((vblank_pipe &
-                    (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
-                   == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
-                       if (temp & VSYNC_PIPEA_FLAG)
-                               atomic_inc(&dev->vbl_received);
-                       if (temp & VSYNC_PIPEB_FLAG)
-                               atomic_inc(&dev->vbl_received2);
-               } else if (((temp & VSYNC_PIPEA_FLAG) &&
-                           (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
-                          ((temp & VSYNC_PIPEB_FLAG) &&
-                           (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
-                       atomic_inc(&dev->vbl_received);
-               DRM_WAKEUP(&dev->vbl_queue);
-               drm_vbl_send_signals(dev);
                if (dev_priv->swaps_pending > 0)
                        drm_locked_tasklet(dev, i915_vblank_tasklet);
-               I915_WRITE(I915REG_PIPEASTAT,
-                       pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
-                       I915_VBLANK_CLEAR);
-               I915_WRITE(I915REG_PIPEBSTAT,
-                       pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
-                       I915_VBLANK_CLEAR);
        }
  
 +      /* for now lest just ack it */
 +      if (temp & (1 << 17)) {
 +              DRM_DEBUG("Hotplug event received\n");
 +
 +              temp2 = I915_READ(PORT_HOTPLUG_STAT);
 +
 +              i915_run_hotplug_tasklet(dev, temp2);
 +
 +              I915_WRITE(PORT_HOTPLUG_STAT,temp2);
 +      }
 +
        return IRQ_HANDLED;
  }
  
  int i915_emit_irq(struct drm_device *dev)
  {
-       
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        RING_LOCALS;
  
        i915_kernel_lost_context(dev);
  
-       DRM_DEBUG("%s\n", __FUNCTION__);
+       DRM_DEBUG("\n");
  
        i915_emit_breadcrumb(dev);
  
        return dev_priv->counter;
  }
  
 -void i915_user_irq_on(drm_i915_private_t *dev_priv)
 +void i915_user_irq_on(struct drm_i915_private *dev_priv)
  {
        DRM_SPINLOCK(&dev_priv->user_irq_lock);
        if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
        DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
  
  }
 -
 -void i915_user_irq_off(drm_i915_private_t *dev_priv)
 +              
 +void i915_user_irq_off(struct drm_i915_private *dev_priv)
  {
        DRM_SPINLOCK(&dev_priv->user_irq_lock);
        if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
  
  static int i915_wait_irq(struct drm_device * dev, int irq_nr)
  {
 -      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 +      struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
        int ret = 0;
  
-       DRM_DEBUG("%s irq_nr=%d breadcrumb=%d\n", __FUNCTION__, irq_nr,
+       DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
                  READ_BREADCRUMB(dev_priv));
  
        if (READ_BREADCRUMB(dev_priv) >= irq_nr)
        i915_user_irq_off(dev_priv);
  
        if (ret == -EBUSY) {
-               DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n",
-                         __FUNCTION__,
+               DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
                          READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
        }
  
        return ret;
  }
  
- static int i915_driver_vblank_do_wait(struct drm_device *dev,
-                                     unsigned int *sequence,
-                                     atomic_t *counter)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned int cur_vblank;
-       int ret = 0;
-       if (!dev_priv) {
-               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
-               return -EINVAL;
-       }
-       DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
-                   (((cur_vblank = atomic_read(counter))
-                       - *sequence) <= (1<<23)));
-       *sequence = cur_vblank;
-       return ret;
- }
- void i915_driver_wait_next_vblank(struct drm_device *dev, int pipe)
- {
-       unsigned int seq;
-       seq = pipe ? atomic_read(&dev->vbl_received2) + 1 :
-               atomic_read(&dev->vbl_received) + 1;
-       if (!pipe)
-               i915_driver_vblank_do_wait(dev, &seq, &dev->vbl_received);
-       else
-               i915_driver_vblank_do_wait(dev, &seq, &dev->vbl_received2);
- }
- int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
- {
-       atomic_t *counter;
-       if (i915_get_pipe(dev, 0) == 0)
-               counter = &dev->vbl_received;
-       else
-               counter = &dev->vbl_received2;
-       return i915_driver_vblank_do_wait(dev, sequence, counter);
- }
- int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
- {
-       atomic_t *counter;
-       if (i915_get_pipe(dev, 1) == 0)
-               counter = &dev->vbl_received;
-       else
-               counter = &dev->vbl_received2;
-       return i915_driver_vblank_do_wait(dev, sequence, counter);
- }
  /* Needs the lock as it touches the ring.
   */
  int i915_irq_emit(struct drm_device *dev, void *data,
                         struct drm_file *file_priv)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      drm_i915_irq_emit_t *emit = data;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_irq_emit *emit = data;
        int result;
  
        LOCK_TEST_WITH_RETURN(dev, file_priv);
  
        if (!dev_priv) {
-               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               DRM_ERROR("called with no initialization\n");
                return -EINVAL;
        }
  
  int i915_irq_wait(struct drm_device *dev, void *data,
                  struct drm_file *file_priv)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      drm_i915_irq_wait_t *irqwait = data;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_irq_wait *irqwait = data;
  
        if (!dev_priv) {
-               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               DRM_ERROR("called with no initialization\n");
                return -EINVAL;
        }
  
        return i915_wait_irq(dev, irqwait->irq_seq);
  }
  
void i915_enable_interrupt (struct drm_device *dev)
int i915_enable_vblank(struct drm_device *dev, int plane)
  {
 -      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 +      struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
-       struct drm_output *o;
+       int pipe = i915_get_pipe(dev, plane);
 -
 +      
-       dev_priv->irq_enable_reg = USER_INT_FLAG; 
-       if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
+       switch (pipe) {
+       case 0:
                dev_priv->irq_enable_reg |= VSYNC_PIPEA_FLAG;
-       if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
+               break;
+       case 1:
                dev_priv->irq_enable_reg |= VSYNC_PIPEB_FLAG;
 -      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+               break;
+       default:
+               DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
+                         pipe);
+               break;
+       }
+       I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
+       return 0;
+ }
+ void i915_disable_vblank(struct drm_device *dev, int plane)
+ {
 -static void i915_enable_interrupt (struct drm_device *dev)
++      struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+       int pipe = i915_get_pipe(dev, plane);
+       switch (pipe) {
+       case 0:
+               dev_priv->irq_enable_reg &= ~VSYNC_PIPEA_FLAG;
+               break;
+       case 1:
+               dev_priv->irq_enable_reg &= ~VSYNC_PIPEB_FLAG;
+               break;
+       default:
+               DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
+                         pipe);
+               break;
+       }
+       I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
+ }
 -      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 -      
++void i915_enable_interrupt (struct drm_device *dev)
+ {
++      struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
++      struct drm_output *o;
++
+       dev_priv->irq_enable_reg |= USER_INT_FLAG;
  
 -      I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
 +      if (IS_I9XX(dev) && dev->mode_config.num_output) {
 +              dev_priv->irq_enable_reg |= HOTPLUG_FLAG;
 +
 +              /* Activate the CRT */
 +              I915_WRITE(PORT_HOTPLUG_EN, CRT_HOTPLUG_INT_EN);
 +
 +              /* SDVOB */
 +              o = intel_sdvo_find(dev, 1);
 +              if (o && intel_sdvo_supports_hotplug(o)) {
 +                      intel_sdvo_set_hotplug(o, 1);
 +                      I915_WRITE(PORT_HOTPLUG_EN, SDVOB_HOTPLUG_INT_EN);
 +              }
 +
 +              /* SDVOC */
 +              o = intel_sdvo_find(dev, 0);
 +              if (o && intel_sdvo_supports_hotplug(o)) {
 +                      intel_sdvo_set_hotplug(o, 1);
 +                      I915_WRITE(PORT_HOTPLUG_EN, SDVOC_HOTPLUG_INT_EN);
 +              }
 +
 +      }
 +
 +      if (IS_I9XX(dev)) {
 +              I915_WRITE(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
 +      } else {
 +              I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
 +      }
 +
 +      DRM_DEBUG("HEN %08x\n",I915_READ(PORT_HOTPLUG_EN));
 +      DRM_DEBUG("HST %08x\n",I915_READ(PORT_HOTPLUG_STAT));
 +      DRM_DEBUG("IER %08x\n",I915_READ(I915REG_INT_ENABLE_R));
 +      DRM_DEBUG("SDB %08x\n",I915_READ(SDVOB));
 +
 +      I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 +
        dev_priv->irq_enabled = 1;
  }
  
  int i915_vblank_pipe_set(struct drm_device *dev, void *data,
                         struct drm_file *file_priv)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      drm_i915_vblank_pipe_t *pipe = data;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_vblank_pipe *pipe = data;
  
        if (!dev_priv) {
-               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               DRM_ERROR("called with no initialization\n");
                return -EINVAL;
        }
  
        if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
-               DRM_ERROR("%s called with invalid pipe 0x%x\n",
-                         __FUNCTION__, pipe->pipe);
+               DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
                return -EINVAL;
        }
  
        dev_priv->vblank_pipe = pipe->pipe;
  
-       i915_enable_interrupt (dev);
        return 0;
  }
  
  int i915_vblank_pipe_get(struct drm_device *dev, void *data,
                         struct drm_file *file_priv)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      drm_i915_vblank_pipe_t *pipe = data;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_vblank_pipe *pipe = data;
        u16 flag;
  
        if (!dev_priv) {
-               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               DRM_ERROR("called with no initialization\n");
                return -EINVAL;
        }
  
  int i915_vblank_swap(struct drm_device *dev, void *data,
                     struct drm_file *file_priv)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      drm_i915_vblank_swap_t *swap = data;
 -      drm_i915_vbl_swap_t *vbl_swap;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_vblank_swap *swap = data;
 +      struct drm_i915_vbl_swap *vbl_swap;
        unsigned int pipe, seqtype, curseq, plane;
        unsigned long irqflags;
        struct list_head *list;
+       int ret;
  
        if (!dev_priv) {
                DRM_ERROR("%s called with no initialization\n", __func__);
  
        DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
  
-       curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
+       drm_update_vblank_count(dev, pipe);
+       curseq = drm_vblank_count(dev, pipe);
  
        if (seqtype == _DRM_VBLANK_RELATIVE)
                swap->sequence += curseq;
        DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags);
  
        list_for_each(list, &dev_priv->vbl_swaps.head) {
 -              vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
 +              vbl_swap = list_entry(list, struct drm_i915_vbl_swap, head);
  
                if (vbl_swap->drw_id == swap->drawable &&
                    vbl_swap->plane == plane &&
  
        DRM_DEBUG("\n");
  
+       ret = drm_vblank_get(dev, pipe);
+       if (ret) {
+               drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
+               return ret;
+       }
        vbl_swap->drw_id = swap->drawable;
        vbl_swap->plane = plane;
        vbl_swap->sequence = swap->sequence;
  
        DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags);
  
-       list_add_tail((struct list_head *)vbl_swap, &dev_priv->vbl_swaps.head);
+       list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
        dev_priv->swaps_pending++;
  
        DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
  */
  void i915_driver_irq_preinstall(struct drm_device * dev)
  {
 -      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 +      struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
  
        I915_WRITE16(I915REG_HWSTAM, 0xeffe);
 -      I915_WRITE16(I915REG_INT_MASK_R, 0x0);
 -      I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
 +      if (IS_I9XX(dev)) {
 +              I915_WRITE(I915REG_INT_MASK_R, 0x0);
 +              I915_WRITE(I915REG_INT_ENABLE_R, 0x0);
 +      } else {
 +              I915_WRITE16(I915REG_INT_MASK_R, 0x0);
 +              I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
 +      }
 +
  }
  
void i915_driver_irq_postinstall(struct drm_device * dev)
int i915_driver_irq_postinstall(struct drm_device * dev)
  {
 -      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 +      struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+       int ret, num_pipes = 2;
  
        DRM_SPININIT(&dev_priv->swaps_lock, "swap");
        INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
  
        DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
        dev_priv->user_irq_refcount = 0;
+       dev_priv->irq_enable_reg = 0;
+       ret = drm_vblank_init(dev, num_pipes);
+       if (ret)
+               return ret;
+       dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
  
        i915_enable_interrupt(dev);
        DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
         */
  
        I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));
+       return 0;
  }
  
  void i915_driver_irq_uninstall(struct drm_device * dev)
  {
 -      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 -      u16 temp;
 +      struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
 +      u32 temp;
  
        if (!dev_priv)
                return;
  
        dev_priv->irq_enabled = 0;
 -      I915_WRITE16(I915REG_HWSTAM, 0xffff);
 -      I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
 -      I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
  
 -      temp = I915_READ16(I915REG_INT_IDENTITY_R);
 -      I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
 +
 +      if(IS_I9XX(dev)) {
 +              I915_WRITE(I915REG_HWSTAM, 0xffffffff);
 +              I915_WRITE(I915REG_INT_MASK_R, 0xffffffff);
 +              I915_WRITE(I915REG_INT_ENABLE_R, 0x0);
 +
 +              temp = I915_READ(I915REG_INT_IDENTITY_R);
 +              I915_WRITE(I915REG_INT_IDENTITY_R, temp);
 +      } else {
 +              I915_WRITE16(I915REG_HWSTAM, 0xffff);
 +              I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
 +              I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
 +
 +              temp = I915_READ16(I915REG_INT_IDENTITY_R);
 +              I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
 +      }
  }
diff --combined shared-core/i915_mem.c
@@@ -45,8 -45,8 +45,8 @@@
   */
  static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_sarea *sarea_priv = dev_priv->sarea_priv;
        struct drm_tex_region *list;
        unsigned shift, nr;
        unsigned start;
@@@ -256,7 -256,7 +256,7 @@@ void i915_mem_takedown(struct mem_bloc
        *heap = NULL;
  }
  
 -static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
 +static struct mem_block **get_heap(struct drm_i915_private * dev_priv, int region)
  {
        switch (region) {
        case I915_MEM_REGION_AGP:
  int i915_mem_alloc(struct drm_device *dev, void *data,
                   struct drm_file *file_priv)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      drm_i915_mem_alloc_t *alloc = data;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_mem_alloc *alloc = data;
        struct mem_block *block, **heap;
  
        if (!dev_priv) {
-               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               DRM_ERROR("called with no initialization\n");
                return -EINVAL;
        }
  
  int i915_mem_free(struct drm_device *dev, void *data,
                  struct drm_file *file_priv)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      drm_i915_mem_free_t *memfree = data;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_mem_free *memfree = data;
        struct mem_block *block, **heap;
  
        if (!dev_priv) {
-               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               DRM_ERROR("called with no initialization\n");
                return -EINVAL;
        }
  
  int i915_mem_init_heap(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      drm_i915_mem_init_heap_t *initheap = data;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_mem_init_heap *initheap = data;
        struct mem_block **heap;
  
        if (!dev_priv) {
-               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               DRM_ERROR("called with no initialization\n");
                return -EINVAL;
        }
  
  int i915_mem_destroy_heap( struct drm_device *dev, void *data,
                           struct drm_file *file_priv )
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      drm_i915_mem_destroy_heap_t *destroyheap = data;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_mem_destroy_heap *destroyheap = data;
        struct mem_block **heap;
  
        if ( !dev_priv ) {
-               DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
+               DRM_ERROR( "called with no initialization\n" );
                return -EINVAL;
        }
  
diff --combined shared-core/radeon_cp.c
@@@ -816,19 -816,44 +816,44 @@@ static const u32 R300_cp_microcode[][2
        { 0000000000, 0000000000 },
  };
  
+ static u32 RADEON_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+ {
+       u32 ret;
+       RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff));
+       ret = RADEON_READ(R520_MC_IND_DATA);
+       RADEON_WRITE(R520_MC_IND_INDEX, 0);
+       return ret;
+ }
  u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
  {
-       return RADEON_READ(RADEON_MC_FB_LOCATION);
+       
+       if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
+               return RADEON_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
+       else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
+               return RADEON_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
+       else
+               return RADEON_READ(RADEON_MC_FB_LOCATION);
  }
  
  static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
  {
-       RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc);
+       if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
+               RADEON_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
+       else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
+               RADEON_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
+       else
+               RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc);
  }
  
  static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc)
  {
-       RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc);
+       if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
+               RADEON_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
+       else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
+               RADEON_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
+       else
+               RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc);
  }
  
  static int RADEON_READ_PLL(struct drm_device * dev, int addr)
@@@ -1089,41 -1114,43 +1114,43 @@@ static int radeon_do_engine_reset(struc
  
        radeon_do_pixcache_flush(dev_priv);
  
-       clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX);
-       mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL);
-       RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl |
-                                           RADEON_FORCEON_MCLKA |
-                                           RADEON_FORCEON_MCLKB |
-                                           RADEON_FORCEON_YCLKA |
-                                           RADEON_FORCEON_YCLKB |
-                                           RADEON_FORCEON_MC |
-                                           RADEON_FORCEON_AIC));
-       rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET);
-       RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset |
-                                             RADEON_SOFT_RESET_CP |
-                                             RADEON_SOFT_RESET_HI |
-                                             RADEON_SOFT_RESET_SE |
-                                             RADEON_SOFT_RESET_RE |
-                                             RADEON_SOFT_RESET_PP |
-                                             RADEON_SOFT_RESET_E2 |
-                                             RADEON_SOFT_RESET_RB));
-       RADEON_READ(RADEON_RBBM_SOFT_RESET);
-       RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset &
-                                             ~(RADEON_SOFT_RESET_CP |
-                                               RADEON_SOFT_RESET_HI |
-                                               RADEON_SOFT_RESET_SE |
-                                               RADEON_SOFT_RESET_RE |
-                                               RADEON_SOFT_RESET_PP |
-                                               RADEON_SOFT_RESET_E2 |
-                                               RADEON_SOFT_RESET_RB)));
-       RADEON_READ(RADEON_RBBM_SOFT_RESET);
-       RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl);
-       RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index);
-       RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
+       if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) {
+               clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX);
+               mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL);
+               
+               RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl |
+                                                   RADEON_FORCEON_MCLKA |
+                                                   RADEON_FORCEON_MCLKB |
+                                                   RADEON_FORCEON_YCLKA |
+                                                   RADEON_FORCEON_YCLKB |
+                                                   RADEON_FORCEON_MC |
+                                                   RADEON_FORCEON_AIC));
+               
+               rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET);
+               
+               RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset |
+                                                     RADEON_SOFT_RESET_CP |
+                                                     RADEON_SOFT_RESET_HI |
+                                                     RADEON_SOFT_RESET_SE |
+                                                     RADEON_SOFT_RESET_RE |
+                                                     RADEON_SOFT_RESET_PP |
+                                                     RADEON_SOFT_RESET_E2 |
+                                                     RADEON_SOFT_RESET_RB));
+               RADEON_READ(RADEON_RBBM_SOFT_RESET);
+               RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset &
+                                                     ~(RADEON_SOFT_RESET_CP |
+                                                       RADEON_SOFT_RESET_HI |
+                                                       RADEON_SOFT_RESET_SE |
+                                                       RADEON_SOFT_RESET_RE |
+                                                       RADEON_SOFT_RESET_PP |
+                                                       RADEON_SOFT_RESET_E2 |
+                                                       RADEON_SOFT_RESET_RB)));
+               RADEON_READ(RADEON_RBBM_SOFT_RESET);
+               
+               RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl);
+               RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index);
+               RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
+       }
  
        /* Reset the CP ring */
        radeon_do_cp_reset(dev_priv);
@@@ -1405,28 -1432,6 +1432,28 @@@ static void radeon_set_pcigart(drm_rade
        }
  }
  
 +void radeon_gart_flush(struct drm_device *dev)
 +{
 +      drm_radeon_private_t *dev_priv = dev->dev_private;
 +      
 +      if (dev_priv->flags & RADEON_IS_IGPGART) {
 +              RADEON_READ_IGPGART(dev_priv, RADEON_IGPGART_FLUSH);
 +              RADEON_WRITE_IGPGART(RADEON_IGPGART_FLUSH, 0x1);
 +              RADEON_READ_IGPGART(dev_priv, RADEON_IGPGART_FLUSH);
 +              RADEON_WRITE_IGPGART(RADEON_IGPGART_FLUSH, 0x0);
 +      } else if (dev_priv->flags & RADEON_IS_PCIE) {
 +              u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL);
 +              tmp |= RADEON_PCIE_TX_GART_INVALIDATE_TLB;
 +              RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
 +              tmp &= ~RADEON_PCIE_TX_GART_INVALIDATE_TLB;
 +              RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
 +      } else {
 +
 +
 +      }
 +
 +}
 +
  static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
  {
        drm_radeon_private_t *dev_priv = dev->dev_private;
@@@ -1881,7 -1886,7 +1908,7 @@@ int radeon_cp_init(struct drm_device *d
        LOCK_TEST_WITH_RETURN(dev, file_priv);
  
        if (init->func == RADEON_INIT_R300_CP)
-               r300_init_reg_flags();
+               r300_init_reg_flags(dev);
  
        switch (init->func) {
        case RADEON_INIT_CP:
@@@ -1903,12 -1908,12 +1930,12 @@@ int radeon_cp_start(struct drm_device *
        LOCK_TEST_WITH_RETURN(dev, file_priv);
  
        if (dev_priv->cp_running) {
-               DRM_DEBUG("%s while CP running\n", __FUNCTION__);
+               DRM_DEBUG("while CP running\n");
                return 0;
        }
        if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) {
-               DRM_DEBUG("%s called with bogus CP mode (%d)\n",
-                         __FUNCTION__, dev_priv->cp_mode);
+               DRM_DEBUG("called with bogus CP mode (%d)\n",
+                         dev_priv->cp_mode);
                return 0;
        }
  
@@@ -2018,7 -2023,7 +2045,7 @@@ int radeon_cp_reset(struct drm_device *
        LOCK_TEST_WITH_RETURN(dev, file_priv);
  
        if (!dev_priv) {
-               DRM_DEBUG("%s called before init done\n", __FUNCTION__);
+               DRM_DEBUG("called before init done\n");
                return -EINVAL;
        }
  
@@@ -2295,6 -2300,10 +2322,10 @@@ int radeon_driver_load(struct drm_devic
        case CHIP_R350:
        case CHIP_R420:
        case CHIP_RV410:
+       case CHIP_RV515:
+       case CHIP_R520:
+       case CHIP_RV570:
+       case CHIP_R580:
                dev_priv->flags |= RADEON_HAS_HIERZ;
                break;
        default:
@@@ -2338,9 -2347,6 +2369,9 @@@ int radeon_driver_firstopen(struct drm_
        if (ret != 0)
                return ret;
  
 +#ifdef RADEON_HAVE_BUFFER
 +      drm_bo_driver_init(dev);
 +#endif
        return 0;
  }
  
diff --combined shared-core/radeon_drv.h
  #define DRIVER_MINOR          28
  #define DRIVER_PATCHLEVEL     0
  
 +#if defined(__linux__)
 +#define RADEON_HAVE_FENCE
 +#define RADEON_HAVE_BUFFER
 +#endif
 +
  /*
   * Radeon chip families
   */
@@@ -129,6 -124,12 +129,12 @@@ enum radeon_family 
        CHIP_R420,
        CHIP_RV410,
        CHIP_RS400,
+       CHIP_RV515,
+       CHIP_R520,
+       CHIP_RV530,
+       CHIP_RV560,
+       CHIP_RV570,
+       CHIP_R580,
        CHIP_LAST,
  };
  
@@@ -289,9 -290,8 +295,9 @@@ typedef struct drm_radeon_private 
        struct mem_block *fb_heap;
  
        /* SW interrupt */
 -      wait_queue_head_t swi_queue;
 -      atomic_t swi_emitted;
 +      wait_queue_head_t irq_queue;
 +      int counter;
 +
        int vblank_crtc;
        uint32_t irq_enable_reg;
        int irq_enabled;
  
        u32 scratch_ages[5];
  
+       unsigned int crtc_last_cnt;
+       unsigned int crtc2_last_cnt;
        /* starting from here on, data is preserved accross an open */
        uint32_t flags;         /* see radeon_chip_flags */
        unsigned long fb_aper_offset;
@@@ -351,7 -354,6 +360,7 @@@ extern int radeon_cp_resume(struct drm_
  extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
  extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
  extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
 +extern void radeon_gart_flush(struct drm_device *dev);
  extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
  
  extern void radeon_freelist_reset(struct drm_device * dev);
@@@ -371,16 -373,15 +380,16 @@@ extern void radeon_mem_release(struct d
                                /* radeon_irq.c */
  extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv);
  extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
 +extern int radeon_emit_irq(struct drm_device * dev);
  
  extern void radeon_do_release(struct drm_device * dev);
- extern int radeon_driver_vblank_wait(struct drm_device * dev,
                                   unsigned int *sequence);
- extern int radeon_driver_vblank_wait2(struct drm_device * dev,
                                    unsigned int *sequence);
+ extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc);
extern int radeon_enable_vblank(struct drm_device *dev, int crtc);
+ extern void radeon_disable_vblank(struct drm_device *dev, int crtc);
extern void radeon_do_release(struct drm_device * dev);
  extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
  extern void radeon_driver_irq_preinstall(struct drm_device * dev);
- extern void radeon_driver_irq_postinstall(struct drm_device * dev);
+ extern int radeon_driver_irq_postinstall(struct drm_device * dev);
  extern void radeon_driver_irq_uninstall(struct drm_device * dev);
  extern int radeon_vblank_crtc_get(struct drm_device *dev);
  extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
@@@ -399,36 -400,12 +408,36 @@@ extern long radeon_compat_ioctl(struct 
                                         unsigned long arg);
  
  /* r300_cmdbuf.c */
- extern void r300_init_reg_flags(void);
+ extern void r300_init_reg_flags(struct drm_device *dev);
  
  extern int r300_do_cp_cmdbuf(struct drm_device *dev,
                             struct drm_file *file_priv,
                             drm_radeon_kcmd_buffer_t *cmdbuf);
  
- extern uint32_t radeon_evict_mask(struct drm_buffer_object *bo);
 +
 +#ifdef RADEON_HAVE_FENCE
 +/* i915_fence.c */
 +
 +
 +extern void radeon_fence_handler(struct drm_device *dev);
 +extern int radeon_fence_emit_sequence(struct drm_device *dev, uint32_t class,
 +                                    uint32_t flags, uint32_t *sequence, 
 +                                  uint32_t *native_type);
 +extern void radeon_poke_flush(struct drm_device *dev, uint32_t class);
 +extern int radeon_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags);
 +#endif
 +
 +#ifdef RADEON_HAVE_BUFFER
 +/* radeon_buffer.c */
 +extern struct drm_ttm_backend *radeon_create_ttm_backend_entry(struct drm_device *dev);
 +extern int radeon_fence_types(struct drm_buffer_object *bo, uint32_t *class, uint32_t *type);
 +extern int radeon_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
++extern uint64_t radeon_evict_flags(struct drm_buffer_object *bo);
 +extern int radeon_init_mem_type(struct drm_device * dev, uint32_t type,
 +                              struct drm_mem_type_manager * man);
 +extern int radeon_move(struct drm_buffer_object * bo,
 +                     int evict, int no_wait, struct drm_bo_mem_reg * new_mem);
 +#endif
  /* Flags for stats.boxes
   */
  #define RADEON_BOX_DMA_IDLE      0x1
  #define RADEON_IGPGART_ENABLE           0x38
  #define RADEON_IGPGART_UNK_39           0x39
  
+ #define R520_MC_IND_INDEX 0x70
+ #define R520_MC_IND_WR_EN (1<<24)
+ #define R520_MC_IND_DATA  0x74
+ #define RV515_MC_FB_LOCATION 0x01
+ #define RV515_MC_AGP_LOCATION 0x02
+ #define R520_MC_FB_LOCATION 0x04
+ #define R520_MC_AGP_LOCATION 0x05
  
  #define RADEON_MPP_TB_CONFIG          0x01c0
  #define RADEON_MEM_CNTL                       0x0140
                                ? DRM_READ32( dev_priv->ring_rptr, RADEON_SCRATCHOFF(x) ) \
                                : RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) )
  
+ #define RADEON_CRTC_CRNT_FRAME 0x0214
+ #define RADEON_CRTC2_CRNT_FRAME 0x0314
+ #define RADEON_CRTC_STATUS            0x005c
+ #define RADEON_CRTC2_STATUS           0x03fc
  #define RADEON_GEN_INT_CNTL           0x0040
  #     define RADEON_CRTC_VBLANK_MASK          (1 << 0)
  #     define RADEON_CRTC2_VBLANK_MASK         (1 << 9)
@@@ -1117,6 -1109,13 +1141,13 @@@ do {                                                                  
        RADEON_WRITE( RADEON_PCIE_DATA, (val) );                        \
  } while (0)
  
+ #define RADEON_WRITE_MCIND( addr, val )                                       \
+       do {                                                            \
+               RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff));    \
+               RADEON_WRITE(R520_MC_IND_DATA, (val));                  \
+               RADEON_WRITE(R520_MC_IND_INDEX, 0);     \
+       } while (0)
  #define CP_PACKET0( reg, n )                                          \
        (RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
  #define CP_PACKET0_TABLE( reg, n )                                    \
@@@ -1227,8 -1226,7 +1258,7 @@@ do {                                                                    
  
  #define BEGIN_RING( n ) do {                                          \
        if ( RADEON_VERBOSE ) {                                         \
-               DRM_INFO( "BEGIN_RING( %d ) in %s\n",                   \
-                          n, __FUNCTION__ );                           \
+               DRM_INFO( "BEGIN_RING( %d )\n", (n));                   \
        }                                                               \
        if ( dev_priv->ring.space <= (n) * sizeof(u32) ) {              \
                COMMIT_RING();                                          \
        write &= mask;                                          \
  } while (0)
  
 +/* Breadcrumb - swi irq */
 +#define READ_BREADCRUMB(dev_priv) RADEON_READ(RADEON_LAST_SWI_REG)
 +
 +static inline int radeon_update_breadcrumb(struct drm_device *dev)
 +{
 +      drm_radeon_private_t *dev_priv = dev->dev_private;
 +
 +      dev_priv->sarea_priv->last_fence = ++dev_priv->counter;
 +
 +      if (dev_priv->counter > 0x7FFFFFFFUL)
 +              dev_priv->sarea_priv->last_fence = dev_priv->counter = 1;
 +
 +      return dev_priv->counter;
 +}
 +
  #endif                                /* __RADEON_DRV_H__ */
diff --combined shared-core/radeon_irq.c
  #include "radeon_drm.h"
  #include "radeon_drv.h"
  
- static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv,
-                                             u32 mask)
+ static void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
  {
-       u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS) & mask;
+       drm_radeon_private_t *dev_priv = dev->dev_private;
+       if (state)
+               dev_priv->irq_enable_reg |= mask;
+       else
+               dev_priv->irq_enable_reg &= ~mask;
+       RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
+ }
+ int radeon_enable_vblank(struct drm_device *dev, int crtc)
+ {
+       switch (crtc) {
+       case 0:
+               radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1);
+               break;
+       case 1:
+               radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1);
+               break;
+       default:
+               DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+                         crtc);
+               return EINVAL;
+       }
+       return 0;
+ }
+ void radeon_disable_vblank(struct drm_device *dev, int crtc)
+ {
+       switch (crtc) {
+       case 0:
+               radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0);
+               break;
+       case 1:
+               radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0);
+               break;
+       default:
+               DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+                         crtc);
+               break;
+       }
+ }
+ static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv)
+ {
+       u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS) &
+               (RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT |
+                RADEON_CRTC2_VBLANK_STAT);
        if (irqs)
                RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs);
        return irqs;
  }
  
@@@ -72,53 -121,33 +121,35 @@@ irqreturn_t radeon_driver_irq_handler(D
        /* Only consider the bits we're interested in - others could be used
         * outside the DRM
         */
-       stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
-                                                 RADEON_CRTC_VBLANK_STAT |
-                                                 RADEON_CRTC2_VBLANK_STAT));
+       stat = radeon_acknowledge_irqs(dev_priv);
        if (!stat)
                return IRQ_NONE;
  
        stat &= dev_priv->irq_enable_reg;
  
        /* SW interrupt */
 -      if (stat & RADEON_SW_INT_TEST)
 -              DRM_WAKEUP(&dev_priv->swi_queue);
 -
 +      if (stat & RADEON_SW_INT_TEST) {
 +              DRM_WAKEUP(&dev_priv->irq_queue);
 +#ifdef RADEON_HAVE_FENCE
 +              radeon_fence_handler(dev);
 +#endif
 +      }
        /* VBLANK interrupt */
-       if (stat & (RADEON_CRTC_VBLANK_STAT|RADEON_CRTC2_VBLANK_STAT)) {
-               int vblank_crtc = dev_priv->vblank_crtc;
-               if ((vblank_crtc &
-                    (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) ==
-                   (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
-                       if (stat & RADEON_CRTC_VBLANK_STAT)
-                               atomic_inc(&dev->vbl_received);
-                       if (stat & RADEON_CRTC2_VBLANK_STAT)
-                               atomic_inc(&dev->vbl_received2);
-               } else if (((stat & RADEON_CRTC_VBLANK_STAT) &&
-                          (vblank_crtc & DRM_RADEON_VBLANK_CRTC1)) ||
-                          ((stat & RADEON_CRTC2_VBLANK_STAT) &&
-                           (vblank_crtc & DRM_RADEON_VBLANK_CRTC2)))
-                       atomic_inc(&dev->vbl_received);
-               DRM_WAKEUP(&dev->vbl_queue);
-               drm_vbl_send_signals(dev);
-       }
+       if (stat & RADEON_CRTC_VBLANK_STAT)
+               drm_handle_vblank(dev, 0);
+       if (stat & RADEON_CRTC2_VBLANK_STAT)
+               drm_handle_vblank(dev, 1);
  
        return IRQ_HANDLED;
  }
  
 -static int radeon_emit_irq(struct drm_device * dev)
 +int radeon_emit_irq(struct drm_device * dev)
  {
        drm_radeon_private_t *dev_priv = dev->dev_private;
        unsigned int ret;
        RING_LOCALS;
  
 -      atomic_inc(&dev_priv->swi_emitted);
 -      ret = atomic_read(&dev_priv->swi_emitted);
 +      ret = radeon_update_breadcrumb(dev);
  
        BEGIN_RING(4);
        OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
@@@ -135,65 -164,38 +166,38 @@@ static int radeon_wait_irq(struct drm_d
            (drm_radeon_private_t *) dev->dev_private;
        int ret = 0;
  
 -      if (RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr)
 +      if (READ_BREADCRUMB(dev_priv) >= swi_nr)
                return 0;
  
        dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
  
 -      DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * DRM_HZ,
 -                  RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr);
 +      DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
 +                  READ_BREADCRUMB(dev_priv) >= swi_nr);
  
        return ret;
  }
  
- static int radeon_driver_vblank_do_wait(struct drm_device * dev,
-                                       unsigned int *sequence, int crtc)
+ u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc)
  {
-       drm_radeon_private_t *dev_priv =
-           (drm_radeon_private_t *) dev->dev_private;
-       unsigned int cur_vblank;
-       int ret = 0;
-       int ack = 0;
-       atomic_t *counter;
+       drm_radeon_private_t *dev_priv = dev->dev_private;
+       u32 crtc_cnt_reg, crtc_status_reg;
        if (!dev_priv) {
-               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               DRM_ERROR("called with no initialization\n");
                return -EINVAL;
        }
  
-       if (crtc == DRM_RADEON_VBLANK_CRTC1) {
-               counter = &dev->vbl_received;
-               ack |= RADEON_CRTC_VBLANK_STAT;
-       } else if (crtc == DRM_RADEON_VBLANK_CRTC2) {
-               counter = &dev->vbl_received2;
-               ack |= RADEON_CRTC2_VBLANK_STAT;
-       } else
+       if (crtc == 0) {
+               crtc_cnt_reg = RADEON_CRTC_CRNT_FRAME;
+               crtc_status_reg = RADEON_CRTC_STATUS;
+       } else if (crtc == 1) {
+               crtc_cnt_reg = RADEON_CRTC2_CRNT_FRAME;
+               crtc_status_reg = RADEON_CRTC2_STATUS;
+       } else {
                return -EINVAL;
+       }
  
-       radeon_acknowledge_irqs(dev_priv, ack);
-       dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
-       /* Assume that the user has missed the current sequence number
-        * by about a day rather than she wants to wait for years
-        * using vertical blanks...
-        */
-       DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
-                   (((cur_vblank = atomic_read(counter))
-                     - *sequence) <= (1 << 23)));
-       *sequence = cur_vblank;
-       return ret;
- }
- int radeon_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
- {
-       return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC1);
- }
- int radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
- {
-       return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC2);
+       return RADEON_READ(crtc_cnt_reg) + (RADEON_READ(crtc_status_reg) & 1);
  }
  
  /* Needs the lock as it touches the ring.
@@@ -207,7 -209,7 +211,7 @@@ int radeon_irq_emit(struct drm_device *
        LOCK_TEST_WITH_RETURN(dev, file_priv);
  
        if (!dev_priv) {
-               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               DRM_ERROR("called with no initialization\n");
                return -EINVAL;
        }
  
@@@ -229,28 -231,13 +233,13 @@@ int radeon_irq_wait(struct drm_device *
        drm_radeon_irq_wait_t *irqwait = data;
  
        if (!dev_priv) {
-               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               DRM_ERROR("called with no initialization\n");
                return -EINVAL;
        }
  
        return radeon_wait_irq(dev, irqwait->irq_seq);
  }
  
- static void radeon_enable_interrupt(struct drm_device *dev)
- {
-       drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
-       dev_priv->irq_enable_reg = RADEON_SW_INT_ENABLE;
-       if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC1)
-               dev_priv->irq_enable_reg |= RADEON_CRTC_VBLANK_MASK;
-       if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC2)
-               dev_priv->irq_enable_reg |= RADEON_CRTC2_VBLANK_MASK;
-       RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
-       dev_priv->irq_enabled = 1;
- }
  /* drm_dma.h hooks
  */
  void radeon_driver_irq_preinstall(struct drm_device * dev)
        RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
  
        /* Clear bits if they're already high */
-       radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
-                                          RADEON_CRTC_VBLANK_STAT |
-                                          RADEON_CRTC2_VBLANK_STAT));
+       radeon_acknowledge_irqs(dev_priv);
  }
  
void radeon_driver_irq_postinstall(struct drm_device * dev)
int radeon_driver_irq_postinstall(struct drm_device * dev)
  {
        drm_radeon_private_t *dev_priv =
            (drm_radeon_private_t *) dev->dev_private;
+       int ret;
  
 -      atomic_set(&dev_priv->swi_emitted, 0);
 -      DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
 +      dev_priv->counter = 0;
 +      DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
  
-       radeon_enable_interrupt(dev);
+       ret = drm_vblank_init(dev, 2);
+       if (ret)
+               return ret;
+       dev->max_vblank_count = 0x001fffff;
+       radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
+       return 0;
  }
  
  void radeon_driver_irq_uninstall(struct drm_device * dev)
@@@ -317,6 -311,5 +313,5 @@@ int radeon_vblank_crtc_set(struct drm_d
                return -EINVAL;
        }
        dev_priv->vblank_crtc = (unsigned int)value;
-       radeon_enable_interrupt(dev);
        return 0;
  }
diff --combined shared-core/radeon_ms.h
index a784882,0000000..6653383
mode 100644,000000..100644
--- /dev/null
@@@ -1,607 -1,0 +1,607 @@@
- uint32_t radeon_ms_evict_mask(struct drm_buffer_object *bo);
 +/*
 + * Copyright 2007 Jérôme Glisse
 + * Copyright 2007 Dave Airlie
 + * Copyright 2007 Alex Deucher
 + * All Rights Reserved.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice (including the next
 + * paragraph) shall be included in all copies or substantial portions of the
 + * Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + */
 +/*
 + * Authors:
 + *    Jérôme Glisse <glisse@freedesktop.org>
 + */
 +#ifndef __RADEON_MS_H__
 +#define __RADEON_MS_H__
 +
 +#include "radeon_ms_drv.h"
 +#include "radeon_ms_reg.h"
 +#include "radeon_ms_drm.h"
 +#include "radeon_ms_rom.h"
 +#include "radeon_ms_properties.h"
 +
 +#define DRIVER_AUTHOR      "Jerome Glisse, Dave Airlie,  Gareth Hughes, "\
 +                         "Keith Whitwell, others."
 +#define DRIVER_NAME        "radeon_ms"
 +#define DRIVER_DESC        "radeon kernel modesetting"
 +#define DRIVER_DATE        "20071108"
 +#define DRIVER_MAJOR        1
 +#define DRIVER_MINOR        0
 +#define DRIVER_PATCHLEVEL   0
 +
 +enum radeon_bus_type {
 +      RADEON_PCI = 0x10000,
 +      RADEON_AGP = 0x20000,
 +      RADEON_PCIE = 0x30000,
 +};
 +
 +enum radeon_family {
 +      CHIP_R100,
 +      CHIP_RV100,
 +      CHIP_RS100,
 +      CHIP_RV200,
 +      CHIP_RS200,
 +      CHIP_R200,
 +      CHIP_RV250,
 +      CHIP_RS300,
 +      CHIP_RV280,
 +      CHIP_R300,
 +      CHIP_R350,
 +      CHIP_R360,
 +      CHIP_RV350,
 +      CHIP_RV370,
 +      CHIP_RV380,
 +      CHIP_RS400,
 +      CHIP_RV410,
 +      CHIP_R420,
 +      CHIP_R430,
 +      CHIP_R480,
 +      CHIP_LAST,
 +};
 +
 +enum radeon_monitor_type {
 +      MT_UNKNOWN = -1,
 +      MT_NONE    = 0,
 +      MT_CRT     = 1,
 +      MT_LCD     = 2,
 +      MT_DFP     = 3,
 +      MT_CTV     = 4,
 +      MT_STV     = 5
 +};
 +
 +enum radeon_connector_type {
 +      CONNECTOR_NONE,
 +      CONNECTOR_PROPRIETARY,
 +      CONNECTOR_VGA,
 +      CONNECTOR_DVI_I,
 +      CONNECTOR_DVI_D,
 +      CONNECTOR_CTV,
 +      CONNECTOR_STV,
 +      CONNECTOR_UNSUPPORTED
 +};
 +
 +enum radeon_output_type {
 +      OUTPUT_NONE,
 +      OUTPUT_DAC1,
 +      OUTPUT_DAC2,
 +      OUTPUT_TMDS,
 +      OUTPUT_LVDS
 +};
 +
 +struct radeon_state;
 +
 +struct radeon_ms_crtc {
 +      int             crtc;
 +      uint16_t        lut_r[256];
 +      uint16_t        lut_g[256];
 +      uint16_t        lut_b[256];
 +};
 +
 +struct radeon_ms_i2c {
 +      struct drm_device           *drm_dev;
 +      uint32_t                    reg;
 +      struct i2c_adapter          adapter;
 +      struct i2c_algo_bit_data    algo;
 +};
 +
 +struct radeon_ms_connector {
 +      struct radeon_ms_i2c    *i2c;
 +      struct edid             *edid;
 +      struct drm_output       *output;
 +      int                     type;
 +      int                     monitor_type;
 +      int                     crtc;
 +      uint32_t                i2c_reg;
 +      char                    outputs[RADEON_MAX_OUTPUTS];
 +      char                    name[32];
 +};
 +
 +struct radeon_ms_output {
 +      int                         type;
 +      struct drm_device           *dev;
 +      struct radeon_ms_connector  *connector;
 +      int (*initialize)(struct radeon_ms_output *output);
 +      enum drm_output_status (*detect)(struct radeon_ms_output *output);
 +      void (*dpms)(struct radeon_ms_output *output, int mode);
 +      int (*get_modes)(struct radeon_ms_output *output);
 +      bool (*mode_fixup)(struct radeon_ms_output *output,
 +                      struct drm_display_mode *mode,
 +                      struct drm_display_mode *adjusted_mode);
 +      int (*mode_set)(struct radeon_ms_output *output,
 +                      struct drm_display_mode *mode,
 +                      struct drm_display_mode *adjusted_mode);
 +      void (*restore)(struct radeon_ms_output *output,
 +                      struct radeon_state *state);
 +      void (*save)(struct radeon_ms_output *output,
 +                      struct radeon_state *state);
 +};
 +
 +struct radeon_state {
 +      /* memory */
 +      uint32_t        config_aper_0_base;
 +      uint32_t        config_aper_1_base;
 +      uint32_t        config_aper_size;
 +      uint32_t        mc_fb_location;
 +      uint32_t        display_base_addr;
 +      /* irq */
 +      uint32_t        gen_int_cntl;
 +      /* pci */
 +      uint32_t        aic_ctrl;
 +      uint32_t        aic_pt_base;
 +      uint32_t        aic_pt_base_lo;
 +      uint32_t        aic_pt_base_hi;
 +      uint32_t        aic_lo_addr;
 +      uint32_t        aic_hi_addr;
 +      /* agp */
 +      uint32_t        agp_cntl;
 +      uint32_t        agp_command;
 +      uint32_t        agp_base;
 +      uint32_t        agp_base_2;
 +      uint32_t        bus_cntl;
 +      uint32_t        mc_agp_location;
 +      /* cp */
 +      uint32_t        cp_rb_cntl;
 +      uint32_t        cp_rb_base;
 +      uint32_t        cp_rb_rptr_addr;
 +      uint32_t        cp_rb_wptr;
 +      uint32_t        cp_rb_wptr_delay;
 +      uint32_t        scratch_umsk;
 +      uint32_t        scratch_addr;
 +      /* pcie */
 +      uint32_t        pcie_tx_gart_cntl;
 +      uint32_t        pcie_tx_gart_discard_rd_addr_lo;
 +      uint32_t        pcie_tx_gart_discard_rd_addr_hi;
 +      uint32_t        pcie_tx_gart_base;
 +      uint32_t        pcie_tx_gart_start_lo;
 +      uint32_t        pcie_tx_gart_start_hi;
 +      uint32_t        pcie_tx_gart_end_lo;
 +      uint32_t        pcie_tx_gart_end_hi;
 +      /* surface */
 +      uint32_t        surface_cntl;
 +      uint32_t        surface0_info;
 +      uint32_t        surface0_lower_bound;
 +      uint32_t        surface0_upper_bound;
 +      uint32_t        surface1_info;
 +      uint32_t        surface1_lower_bound;
 +      uint32_t        surface1_upper_bound;
 +      uint32_t        surface2_info;
 +      uint32_t        surface2_lower_bound;
 +      uint32_t        surface2_upper_bound;
 +      uint32_t        surface3_info;
 +      uint32_t        surface3_lower_bound;
 +      uint32_t        surface3_upper_bound;
 +      uint32_t        surface4_info;
 +      uint32_t        surface4_lower_bound;
 +      uint32_t        surface4_upper_bound;
 +      uint32_t        surface5_info;
 +      uint32_t        surface5_lower_bound;
 +      uint32_t        surface5_upper_bound;
 +      uint32_t        surface6_info;
 +      uint32_t        surface6_lower_bound;
 +      uint32_t        surface6_upper_bound;
 +      uint32_t        surface7_info;
 +      uint32_t        surface7_lower_bound;
 +      uint32_t        surface7_upper_bound;
 +      /* crtc */
 +      uint32_t        crtc_gen_cntl;
 +      uint32_t        crtc_ext_cntl;
 +      uint32_t        crtc_h_total_disp;
 +      uint32_t        crtc_h_sync_strt_wid;
 +      uint32_t        crtc_v_total_disp;
 +      uint32_t        crtc_v_sync_strt_wid;
 +      uint32_t        crtc_offset;
 +      uint32_t        crtc_offset_cntl;
 +      uint32_t        crtc_pitch;
 +      uint32_t        crtc_more_cntl;
 +      uint32_t        crtc_tile_x0_y0;
 +      uint32_t        fp_h_sync_strt_wid;
 +      uint32_t        fp_v_sync_strt_wid;
 +      uint32_t        fp_crtc_h_total_disp;
 +      uint32_t        fp_crtc_v_total_disp;
 +      /* pll */
 +      uint32_t        clock_cntl_index;
 +      uint32_t        ppll_cntl;
 +      uint32_t        ppll_ref_div;
 +      uint32_t        ppll_div_0;
 +      uint32_t        ppll_div_1;
 +      uint32_t        ppll_div_2;
 +      uint32_t        ppll_div_3;
 +      uint32_t        vclk_ecp_cntl;
 +      uint32_t        htotal_cntl;
 +      /* dac */
 +      uint32_t        dac_cntl;
 +      uint32_t        dac_cntl2;
 +      uint32_t        dac_ext_cntl;
 +      uint32_t        disp_misc_cntl;
 +      uint32_t        dac_macro_cntl;
 +      uint32_t        disp_pwr_man;
 +      uint32_t        disp_merge_cntl;
 +      uint32_t        disp_output_cntl;
 +      uint32_t        disp2_merge_cntl;
 +      uint32_t        dac_embedded_sync_cntl;
 +      uint32_t        dac_broad_pulse;
 +      uint32_t        dac_skew_clks;
 +      uint32_t        dac_incr;
 +      uint32_t        dac_neg_sync_level;
 +      uint32_t        dac_pos_sync_level;
 +      uint32_t        dac_blank_level;
 +      uint32_t        dac_sync_equalization;
 +      uint32_t        tv_dac_cntl;
 +      uint32_t        tv_master_cntl;
 +};
 +
 +struct drm_radeon_private {
 +      /* driver family specific functions */
 +      int (*bus_finish)(struct drm_device *dev);
 +      int (*bus_init)(struct drm_device *dev);
 +      void (*bus_restore)(struct drm_device *dev, struct radeon_state *state);
 +      void (*bus_save)(struct drm_device *dev, struct radeon_state *state);
 +      struct drm_ttm_backend *(*create_ttm)(struct drm_device *dev);
 +      void (*irq_emit)(struct drm_device *dev);
 +      void (*flush_cache)(struct drm_device *dev);
 +      /* bus informations */
 +      void                        *bus;
 +      uint32_t                    bus_type;
 +      /* cp */
 +      uint32_t                    ring_buffer_size;
 +      uint32_t                    ring_rptr;
 +      uint32_t                    ring_wptr;
 +      uint32_t                    ring_mask;
 +      int                         ring_free;
 +      uint32_t                    ring_tail_mask;
 +      uint32_t                    write_back_area_size;
 +      struct drm_buffer_object    *ring_buffer_object;
 +      struct drm_bo_kmap_obj      ring_buffer_map;
 +      uint32_t                    *ring_buffer;
 +      uint32_t                    *write_back_area;
 +      const uint32_t              *microcode;
 +      /* card family */
 +      uint32_t                    usec_timeout;
 +      uint32_t                    family;
 +      struct radeon_ms_output     *outputs[RADEON_MAX_OUTPUTS];
 +      struct radeon_ms_connector  *connectors[RADEON_MAX_CONNECTORS];
 +      /* drm map (MMIO, FB) */
 +      struct drm_map              mmio;
 +      struct drm_map              vram;
 +      /* gpu address space */
 +      uint32_t                    gpu_vram_size;
 +      uint32_t                    gpu_vram_start;
 +      uint32_t                    gpu_vram_end;
 +      uint32_t                    gpu_gart_size;
 +      uint32_t                    gpu_gart_start;
 +      uint32_t                    gpu_gart_end;
 +      /* state of the card when module was loaded */
 +      struct radeon_state         load_state;
 +      /* state the driver wants */
 +      struct radeon_state         driver_state;
 +      /* last emitted fence */
 +      uint32_t                    fence_id_last;
 +      uint32_t                    fence_reg;
 +      /* when doing gpu stop we save here current state */
 +      uint32_t                    crtc_ext_cntl;
 +      uint32_t                    crtc_gen_cntl;
 +      uint32_t                    crtc2_gen_cntl;
 +      uint32_t                    ov0_scale_cntl;
 +      /* bool & type on the hw */
 +      uint8_t                     crtc1_dpms;
 +      uint8_t                     crtc2_dpms;
 +      uint8_t                     restore_state;
 +      uint8_t                     cp_ready;
 +      uint8_t                     bus_ready;
 +      uint8_t                     write_back;
 +      /* abstract asic specific structures */
 +      struct radeon_ms_rom        rom;
 +      struct radeon_ms_properties properties;
 +};
 +
 +
 +/* radeon_ms_bo.c */
 +int radeon_ms_bo_get_gpu_addr(struct drm_device *dev,
 +                            struct drm_bo_mem_reg *mem,
 +                            uint32_t *gpu_addr);
 +int radeon_ms_bo_move(struct drm_buffer_object * bo, int evict,
 +                    int no_wait, struct drm_bo_mem_reg * new_mem);
 +struct drm_ttm_backend *radeon_ms_create_ttm_backend(struct drm_device * dev);
++uint64_t radeon_ms_evict_flags(struct drm_buffer_object *bo);
 +int radeon_ms_init_mem_type(struct drm_device * dev, uint32_t type,
 +                          struct drm_mem_type_manager * man);
 +int radeon_ms_invalidate_caches(struct drm_device * dev, uint64_t flags);
 +void radeon_ms_ttm_flush(struct drm_ttm *ttm);
 +
 +/* radeon_ms_bus.c */
 +int radeon_ms_agp_finish(struct drm_device *dev);
 +int radeon_ms_agp_init(struct drm_device *dev);
 +void radeon_ms_agp_restore(struct drm_device *dev, struct radeon_state *state);
 +void radeon_ms_agp_save(struct drm_device *dev, struct radeon_state *state);
 +struct drm_ttm_backend *radeon_ms_pcie_create_ttm(struct drm_device *dev);
 +int radeon_ms_pcie_finish(struct drm_device *dev);
 +int radeon_ms_pcie_init(struct drm_device *dev);
 +void radeon_ms_pcie_restore(struct drm_device *dev, struct radeon_state *state);
 +void radeon_ms_pcie_save(struct drm_device *dev, struct radeon_state *state);
 +
 +/* radeon_ms_combios.c */
 +int radeon_ms_combios_get_properties(struct drm_device *dev);
 +int radeon_ms_connectors_from_combios(struct drm_device *dev);
 +int radeon_ms_outputs_from_combios(struct drm_device *dev);
 +
 +/* radeon_ms_compat.c */
 +long radeon_ms_compat_ioctl(struct file *filp, unsigned int cmd,
 +                          unsigned long arg);
 +
 +/* radeon_ms_cp.c */
 +int radeon_ms_cp_finish(struct drm_device *dev);
 +int radeon_ms_cp_init(struct drm_device *dev);
 +void radeon_ms_cp_restore(struct drm_device *dev, struct radeon_state *state);
 +void radeon_ms_cp_save(struct drm_device *dev, struct radeon_state *state);
 +void radeon_ms_cp_stop(struct drm_device *dev);
 +int radeon_ms_cp_wait(struct drm_device *dev, int n);
 +int radeon_ms_ring_emit(struct drm_device *dev, uint32_t *cmd, uint32_t count);
 +
 +/* radeon_ms_crtc.c */
 +int radeon_ms_crtc_create(struct drm_device *dev, int crtc);
 +void radeon_ms_crtc1_restore(struct drm_device *dev,
 +                           struct radeon_state *state);
 +void radeon_ms_crtc1_save(struct drm_device *dev, struct radeon_state *state);
 +
 +/* radeon_ms_dac.c */
 +int radeon_ms_dac1_initialize(struct radeon_ms_output *output);
 +enum drm_output_status radeon_ms_dac1_detect(struct radeon_ms_output *output);
 +void radeon_ms_dac1_dpms(struct radeon_ms_output *output, int mode);
 +int radeon_ms_dac1_get_modes(struct radeon_ms_output *output);
 +bool radeon_ms_dac1_mode_fixup(struct radeon_ms_output *output,
 +              struct drm_display_mode *mode,
 +              struct drm_display_mode *adjusted_mode);
 +int radeon_ms_dac1_mode_set(struct radeon_ms_output *output,
 +              struct drm_display_mode *mode,
 +              struct drm_display_mode *adjusted_mode);
 +void radeon_ms_dac1_restore(struct radeon_ms_output *output,
 +              struct radeon_state *state);
 +void radeon_ms_dac1_save(struct radeon_ms_output *output,
 +              struct radeon_state *state);
 +int radeon_ms_dac2_initialize(struct radeon_ms_output *output);
 +enum drm_output_status radeon_ms_dac2_detect(struct radeon_ms_output *output);
 +void radeon_ms_dac2_dpms(struct radeon_ms_output *output, int mode);
 +int radeon_ms_dac2_get_modes(struct radeon_ms_output *output);
 +bool radeon_ms_dac2_mode_fixup(struct radeon_ms_output *output,
 +              struct drm_display_mode *mode,
 +              struct drm_display_mode *adjusted_mode);
 +int radeon_ms_dac2_mode_set(struct radeon_ms_output *output,
 +              struct drm_display_mode *mode,
 +              struct drm_display_mode *adjusted_mode);
 +void radeon_ms_dac2_restore(struct radeon_ms_output *output,
 +              struct radeon_state *state);
 +void radeon_ms_dac2_save(struct radeon_ms_output *output,
 +              struct radeon_state *state);
 +
 +/* radeon_ms_drm.c */
 +int radeon_ms_driver_dma_ioctl(struct drm_device *dev, void *data,
 +                             struct drm_file *file_priv);
 +void radeon_ms_driver_lastclose(struct drm_device * dev);
 +int radeon_ms_driver_load(struct drm_device *dev, unsigned long flags);
 +int radeon_ms_driver_open(struct drm_device * dev, struct drm_file *file_priv);
 +int radeon_ms_driver_unload(struct drm_device *dev);
 +
 +/* radeon_ms_exec.c */
 +int radeon_ms_execbuffer(struct drm_device *dev, void *data,
 +                       struct drm_file *file_priv);
 +
 +/* radeon_ms_family.c */
 +int radeon_ms_family_init(struct drm_device *dev);
 +
 +/* radeon_ms_fence.c */
 +int radeon_ms_fence_emit_sequence(struct drm_device *dev, uint32_t class,
 +                                uint32_t flags, uint32_t *sequence,
 +                                uint32_t *native_type);
 +void radeon_ms_fence_handler(struct drm_device * dev);
 +int radeon_ms_fence_has_irq(struct drm_device *dev, uint32_t class,
 +                          uint32_t flags);
 +int radeon_ms_fence_types(struct drm_buffer_object *bo,
 +                        uint32_t * class, uint32_t * type);
 +void radeon_ms_poke_flush(struct drm_device * dev, uint32_t class);
 +
 +/* radeon_ms_fb.c */
 +int radeonfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
 +int radeonfb_remove(struct drm_device *dev, struct drm_crtc *crtc);
 +
 +/* radeon_ms_gpu.c */
 +int radeon_ms_gpu_initialize(struct drm_device *dev);
 +void radeon_ms_gpu_dpms(struct drm_device *dev);
 +void radeon_ms_gpu_flush(struct drm_device *dev);
 +void radeon_ms_gpu_restore(struct drm_device *dev, struct radeon_state *state);
 +void radeon_ms_gpu_save(struct drm_device *dev, struct radeon_state *state);
 +int radeon_ms_wait_for_idle(struct drm_device *dev);
 +
 +/* radeon_ms_i2c.c */
 +void radeon_ms_i2c_destroy(struct radeon_ms_i2c *i2c);
 +struct radeon_ms_i2c *radeon_ms_i2c_create(struct drm_device *dev,
 +                                         const uint32_t reg,
 +                                         const char *name);
 +
 +/* radeon_ms_irq.c */
 +void radeon_ms_irq_emit(struct drm_device *dev);
 +irqreturn_t radeon_ms_irq_handler(DRM_IRQ_ARGS);
 +void radeon_ms_irq_preinstall(struct drm_device * dev);
 +void radeon_ms_irq_postinstall(struct drm_device * dev);
 +int radeon_ms_irq_init(struct drm_device *dev);
 +void radeon_ms_irq_restore(struct drm_device *dev, struct radeon_state *state);
 +void radeon_ms_irq_save(struct drm_device *dev, struct radeon_state *state);
 +void radeon_ms_irq_uninstall(struct drm_device * dev);
 +
 +/* radeon_ms_output.c */
 +void radeon_ms_connectors_destroy(struct drm_device *dev);
 +int radeon_ms_connectors_from_properties(struct drm_device *dev);
 +int radeon_ms_connectors_from_rom(struct drm_device *dev);
 +void radeon_ms_outputs_destroy(struct drm_device *dev);
 +int radeon_ms_outputs_from_properties(struct drm_device *dev);
 +int radeon_ms_outputs_from_rom(struct drm_device *dev);
 +void radeon_ms_outputs_restore(struct drm_device *dev,
 +              struct radeon_state *state);
 +void radeon_ms_outputs_save(struct drm_device *dev, struct radeon_state *state);
 +
 +/* radeon_ms_properties.c */
 +int radeon_ms_properties_init(struct drm_device *dev);
 +
 +/* radeon_ms_rom.c */
 +int radeon_ms_rom_get_properties(struct drm_device *dev);
 +int radeon_ms_rom_init(struct drm_device *dev);
 +
 +/* radeon_ms_state.c */
 +void radeon_ms_state_save(struct drm_device *dev, struct radeon_state *state);
 +void radeon_ms_state_restore(struct drm_device *dev,
 +                           struct radeon_state *state);
 +
 +
 +/* packect stuff **************************************************************/
 +#define RADEON_CP_PACKET0                               0x00000000
 +#define CP_PACKET0(reg, n)                                            \
 +      (RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
 +#define CP_PACKET3_CNTL_BITBLT_MULTI                    0xC0009B00
 +#    define GMC_SRC_PITCH_OFFSET_CNTL                       (1    <<  0)
 +#    define GMC_DST_PITCH_OFFSET_CNTL                       (1    <<  1)
 +#    define GMC_BRUSH_NONE                                  (15   <<  4)
 +#    define GMC_SRC_DATATYPE_COLOR                          (3    << 12)
 +#    define ROP3_S                                          0x00cc0000
 +#    define DP_SRC_SOURCE_MEMORY                            (2    << 24)
 +#    define GMC_CLR_CMP_CNTL_DIS                            (1    << 28)
 +#    define GMC_WR_MSK_DIS                                  (1    << 30)
 +
 +/* helper macro & functions ***************************************************/
 +#define REG_S(rn, bn, v)    (((v) << rn##__##bn##__SHIFT) & rn##__##bn##__MASK)
 +#define REG_G(rn, bn, v)    (((v) & rn##__##bn##__MASK) >> rn##__##bn##__SHIFT)
 +#define MMIO_R(rid)         mmio_read(dev_priv, rid)
 +#define MMIO_W(rid, v)      mmio_write(dev_priv, rid, v)
 +#define PCIE_R(rid)         pcie_read(dev_priv, rid)
 +#define PCIE_W(rid, v)      pcie_write(dev_priv, rid, v)
 +#define PPLL_R(rid)         pll_read(dev_priv, rid)
 +#define PPLL_W(rid, v)      pll_write(dev_priv, rid, v)
 +
 +static __inline__ uint32_t mmio_read(struct drm_radeon_private *dev_priv,
 +                                   uint32_t offset)
 +{
 +      return DRM_READ32(&dev_priv->mmio, offset);
 +}
 +
 +
 +static __inline__ void mmio_write(struct drm_radeon_private *dev_priv,
 +                                uint32_t offset, uint32_t v)
 +{
 +      DRM_WRITE32(&dev_priv->mmio, offset, v);
 +}
 +
 +static __inline__ uint32_t pcie_read(struct drm_radeon_private *dev_priv,
 +                                   uint32_t offset)
 +{
 +      MMIO_W(PCIE_INDEX, REG_S(PCIE_INDEX, PCIE_INDEX, offset));
 +      return MMIO_R(PCIE_DATA);
 +}
 +
 +static __inline__ void pcie_write(struct drm_radeon_private *dev_priv,
 +                                uint32_t offset, uint32_t v)
 +{
 +      MMIO_W(PCIE_INDEX, REG_S(PCIE_INDEX, PCIE_INDEX, offset));
 +      MMIO_W(PCIE_DATA, v);
 +}
 +
 +static __inline__ void pll_index_errata(struct drm_radeon_private *dev_priv)
 +{
 +      uint32_t tmp, save;
 +
 +      /* This workaround is necessary on rv200 and RS200 or PLL
 +       * reads may return garbage (among others...)
 +       */
 +      if (dev_priv->properties.pll_dummy_reads) {
 +              tmp = MMIO_R(CLOCK_CNTL_DATA);
 +              tmp = MMIO_R(CRTC_GEN_CNTL);
 +      }
 +      /* This function is required to workaround a hardware bug in some (all?)
 +       * revisions of the R300.  This workaround should be called after every
 +       * CLOCK_CNTL_INDEX register access.  If not, register reads afterward
 +       * may not be correct.
 +       */
 +      if (dev_priv->properties.pll_r300_errata) {
 +              tmp = save = MMIO_R(CLOCK_CNTL_INDEX);
 +              tmp = tmp & ~CLOCK_CNTL_INDEX__PLL_ADDR__MASK;
 +              tmp = tmp & ~CLOCK_CNTL_INDEX__PLL_WR_EN;
 +              MMIO_W(CLOCK_CNTL_INDEX, tmp);
 +              tmp = MMIO_R(CLOCK_CNTL_DATA);
 +              MMIO_W(CLOCK_CNTL_INDEX, save);
 +      }
 +}
 +
 +static __inline__ void pll_data_errata(struct drm_radeon_private *dev_priv)
 +{
 +      /* This workarounds is necessary on RV100, RS100 and RS200 chips
 +       * or the chip could hang on a subsequent access
 +       */
 +      if (dev_priv->properties.pll_delay) {
 +              /* we can't deal with posted writes here ... */
 +              udelay(5000);
 +      }
 +}
 +
 +static __inline__ uint32_t pll_read(struct drm_radeon_private *dev_priv,
 +                                  uint32_t offset)
 +{
 +      uint32_t clock_cntl_index = dev_priv->driver_state.clock_cntl_index;
 +      uint32_t data;
 +
 +      clock_cntl_index &= ~CLOCK_CNTL_INDEX__PLL_ADDR__MASK;
 +      clock_cntl_index |= REG_S(CLOCK_CNTL_INDEX, PLL_ADDR, offset);
 +      MMIO_W(CLOCK_CNTL_INDEX, clock_cntl_index);
 +      pll_index_errata(dev_priv);
 +      data = MMIO_R(CLOCK_CNTL_DATA);
 +      pll_data_errata(dev_priv);
 +      return data;
 +}
 +
 +static __inline__ void pll_write(struct drm_radeon_private *dev_priv,
 +                               uint32_t offset, uint32_t value)
 +{
 +      uint32_t clock_cntl_index = dev_priv->driver_state.clock_cntl_index;
 +
 +      clock_cntl_index &= ~CLOCK_CNTL_INDEX__PLL_ADDR__MASK;
 +      clock_cntl_index |= REG_S(CLOCK_CNTL_INDEX, PLL_ADDR, offset);
 +      clock_cntl_index |= CLOCK_CNTL_INDEX__PLL_WR_EN;
 +      MMIO_W(CLOCK_CNTL_INDEX, clock_cntl_index);
 +      pll_index_errata(dev_priv);
 +      MMIO_W(CLOCK_CNTL_DATA, value);
 +      pll_data_errata(dev_priv);
 +}
 +
 +#endif
index cedad68,0000000..6d9a97c
mode 100644,000000..100644
--- /dev/null
@@@ -1,311 -1,0 +1,311 @@@
-       tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
 +/*
 + * Copyright 2007 Dave Airlie
 + * Copyright 2007 Jérôme Glisse
 + * All Rights Reserved.
 + * 
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the
 + * "Software"), to deal in the Software without restriction, including
 + * without limitation the rights to use, copy, modify, merge, publish,
 + * distribute, sub license, and/or sell copies of the Software, and to
 + * permit persons to whom the Software is furnished to do so, subject to
 + * the following conditions:
 + * 
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
 + * USE OR OTHER DEALINGS IN THE SOFTWARE.
 + *
 + * The above copyright notice and this permission notice (including the
 + * next paragraph) shall be included in all copies or substantial portions
 + * of the Software.
 + */
 +/*
 + * Authors:
 + *    Dave Airlie <airlied@linux.ie>
 + *    Jerome Glisse <glisse@freedesktop.org>
 + */
 +#include "drmP.h"
 +#include "drm.h"
 +
 +#include "radeon_ms.h"
 +
 +void radeon_ms_bo_copy_blit(struct drm_device *dev,
 +                          uint32_t src_offset,
 +                          uint32_t dst_offset,
 +                          uint32_t pages)
 +{
 +      struct drm_radeon_private *dev_priv = dev->dev_private;
 +      uint32_t num_pages, stride, c;
 +      uint32_t offset_inc = 0;
 +      uint32_t cmd[7];
 +
 +      if (!dev_priv) {
 +              return;
 +      }
 +
 +      /* radeon limited to 16320=255*64 bytes per row so copy at
 +       * most 2 pages */
 +      num_pages = 2;
 +      stride = ((num_pages * PAGE_SIZE) / 64) & 0xff;
 +      while(pages > 0) {
 +              if (num_pages > pages) {
 +                      num_pages = pages;
 +                      stride = ((num_pages * PAGE_SIZE) / 64) & 0xff;
 +              }
 +              c = pages / num_pages;
 +              if (c >= 8192) {
 +                      c = 8191;
 +              }
 +              cmd[0] = CP_PACKET3_CNTL_BITBLT_MULTI | (5 << 16);
 +              cmd[1] = GMC_SRC_PITCH_OFFSET_CNTL |
 +                       GMC_DST_PITCH_OFFSET_CNTL |
 +                       GMC_BRUSH_NONE |
 +                       (0x6 << 8) |
 +                       GMC_SRC_DATATYPE_COLOR |
 +                       ROP3_S |
 +                       DP_SRC_SOURCE_MEMORY |
 +                       GMC_CLR_CMP_CNTL_DIS |
 +                       GMC_WR_MSK_DIS;
 +              cmd[2] = (stride << 22) | (src_offset >> 10);
 +              cmd[3] = (stride << 22) | (dst_offset >> 10);
 +              cmd[4] = (0 << 16) | 0;
 +              cmd[5] = (0 << 16) | 0;
 +              cmd[6] = ((stride * 16) << 16) | c;
 +              radeon_ms_ring_emit(dev, cmd, 7);
 +              offset_inc = num_pages * c * PAGE_SIZE;
 +              src_offset += offset_inc;
 +              dst_offset += offset_inc;
 +              pages -= num_pages * c;
 +      }
 +      /* wait for 2d engine to go busy so wait_until stall */
 +      for (c = 0; c < dev_priv->usec_timeout; c++) {
 +              uint32_t status = MMIO_R(RBBM_STATUS);
 +              if ((RBBM_STATUS__E2_BUSY & status) ||
 +                  (RBBM_STATUS__CBA2D_BUSY & status)) {
 +                      DRM_INFO("[radeon_ms] RBBM_STATUS 0x%08X\n", status);
 +                      break;
 +              }
 +              DRM_UDELAY(1);
 +      }
 +      /* Sync everything up */
 +      cmd[0] = CP_PACKET0(WAIT_UNTIL, 0);
 +      cmd[1] = WAIT_UNTIL__WAIT_2D_IDLECLEAN |
 +               WAIT_UNTIL__WAIT_HOST_IDLECLEAN;
 +      radeon_ms_ring_emit(dev, cmd, 2);
 +      return;
 +}
 +
 +static int radeon_ms_bo_move_blit(struct drm_buffer_object *bo,
 +                                int evict, int no_wait,
 +                                struct drm_bo_mem_reg *new_mem)
 +{
 +      struct drm_device *dev = bo->dev;
 +      struct drm_bo_mem_reg *old_mem = &bo->mem;
 +      uint32_t gpu_src_addr;
 +      uint32_t gpu_dst_addr;
 +      int ret;
 +
 +      ret = radeon_ms_bo_get_gpu_addr(dev, old_mem, &gpu_src_addr);
 +      if (ret) {
 +              return ret;
 +      }
 +      ret = radeon_ms_bo_get_gpu_addr(dev, new_mem, &gpu_dst_addr);
 +      if (ret) {
 +              return ret;
 +      }
 +
 +      radeon_ms_bo_copy_blit(bo->dev,
 +                             gpu_src_addr,
 +                             gpu_dst_addr,
 +                             new_mem->num_pages);
 +      
 +      ret = drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
 +                                       DRM_FENCE_TYPE_EXE |
 +                                       DRM_RADEON_FENCE_TYPE_RW,
 +                                       DRM_RADEON_FENCE_FLAG_FLUSHED,
 +                                       new_mem);
 +      return ret;
 +}
 +
 +static int radeon_ms_bo_move_flip(struct drm_buffer_object *bo,
 +                                int evict, int no_wait,
 +                                struct drm_bo_mem_reg *new_mem)
 +{
 +      struct drm_device *dev = bo->dev;
 +      struct drm_bo_mem_reg tmp_mem;
 +      int ret;
 +
 +      tmp_mem = *new_mem;
 +      tmp_mem.mm_node = NULL;
-       ret = drm_bind_ttm(bo->ttm, &tmp_mem);
++      tmp_mem.flags = DRM_BO_FLAG_MEM_TT |
 +                     DRM_BO_FLAG_CACHED |
 +                     DRM_BO_FLAG_FORCE_CACHING;
 +      ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
 +      if (ret) {
 +              return ret;
 +      }
 +
- uint32_t radeon_ms_evict_mask(struct drm_buffer_object *bo)
++      ret = drm_ttm_bind(bo->ttm, &tmp_mem);
 +      if (ret) {
 +              goto out_cleanup;
 +      }
 +      ret = radeon_ms_bo_move_blit(bo, 1, no_wait, &tmp_mem);
 +      if (ret) {
 +              goto out_cleanup;
 +      }
 +      ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
 +out_cleanup:
 +      if (tmp_mem.mm_node) {
 +              mutex_lock(&dev->struct_mutex);
 +              if (tmp_mem.mm_node != bo->pinned_node)
 +                      drm_mm_put_block(tmp_mem.mm_node);
 +              tmp_mem.mm_node = NULL;
 +              mutex_unlock(&dev->struct_mutex);
 +      }
 +      return ret;
 +}
 +
 +int radeon_ms_bo_get_gpu_addr(struct drm_device *dev,
 +                            struct drm_bo_mem_reg *mem,
 +                            uint32_t *gpu_addr)
 +{
 +      struct drm_radeon_private *dev_priv = dev->dev_private;
 +
 +      *gpu_addr = mem->mm_node->start << PAGE_SHIFT;
 +      switch (mem->flags & DRM_BO_MASK_MEM) {
 +      case DRM_BO_FLAG_MEM_TT:
 +              *gpu_addr +=  dev_priv->gpu_gart_start;
 +              DRM_INFO("[radeon_ms] GPU TT: 0x%08X\n", *gpu_addr);
 +              break;
 +      case DRM_BO_FLAG_MEM_VRAM:
 +              *gpu_addr +=  dev_priv->gpu_vram_start;
 +              DRM_INFO("[radeon_ms] GPU VRAM: 0x%08X\n", *gpu_addr);
 +              break;
 +      default:
 +              DRM_ERROR("[radeon_ms] memory not accessible by GPU\n");
 +              return -EINVAL;
 +      }
 +      return 0;
 +}
 +
 +int radeon_ms_bo_move(struct drm_buffer_object *bo, int evict,
 +                 int no_wait, struct drm_bo_mem_reg *new_mem)
 +{
 +      struct drm_bo_mem_reg *old_mem = &bo->mem;
 +      if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
 +              return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
 +      } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
 +              if (radeon_ms_bo_move_flip(bo, evict, no_wait, new_mem))
 +                      return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
 +      } else {
 +              if (radeon_ms_bo_move_blit(bo, evict, no_wait, new_mem))
 +                      return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
 +      }
 +      return 0;
 +}
 +
 +struct drm_ttm_backend *radeon_ms_create_ttm_backend(struct drm_device * dev)
 +{
 +      struct drm_radeon_private *dev_priv = dev->dev_private;
 +
 +      if (dev_priv && dev_priv->create_ttm)
 +              return dev_priv->create_ttm(dev);
 +      return NULL;
 +}
 +
++uint64_t radeon_ms_evict_flags(struct drm_buffer_object *bo)
 +{
 +      switch (bo->mem.mem_type) {
 +      case DRM_BO_MEM_LOCAL:
 +      case DRM_BO_MEM_TT:
 +              return DRM_BO_FLAG_MEM_LOCAL;
 +      case DRM_BO_MEM_VRAM:
 +              if (bo->mem.num_pages > 128)
 +                      return DRM_BO_MEM_TT;
 +              else
 +                      return DRM_BO_MEM_LOCAL;
 +      default:
 +              return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
 +      }
 +}
 +
 +int radeon_ms_init_mem_type(struct drm_device * dev, uint32_t type,
 +                          struct drm_mem_type_manager * man)
 +{
 +      struct drm_radeon_private *dev_priv = dev->dev_private;
 +
 +      switch (type) {
 +      case DRM_BO_MEM_LOCAL:
 +              man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
 +                  _DRM_FLAG_MEMTYPE_CACHED;
 +              man->drm_bus_maptype = 0;
 +              break;
 +      case DRM_BO_MEM_VRAM:
 +              man->flags =  _DRM_FLAG_MEMTYPE_FIXED |
 +                            _DRM_FLAG_MEMTYPE_MAPPABLE |
 +                            _DRM_FLAG_NEEDS_IOREMAP;
 +              man->io_addr = NULL;
 +              man->drm_bus_maptype = _DRM_FRAME_BUFFER;
 +              man->io_offset = dev_priv->vram.offset;
 +              man->io_size = dev_priv->vram.size;
 +              break;
 +      case DRM_BO_MEM_TT:
 +              if (!dev_priv->bus_ready) {
 +                      DRM_ERROR("Bus isn't initialized while "
 +                                "intializing TT memory type\n");
 +                      return -EINVAL;
 +              }
 +              switch(dev_priv->bus_type) {
 +              case RADEON_AGP:
 +                      if (!(drm_core_has_AGP(dev) && dev->agp)) {
 +                              DRM_ERROR("AGP is not enabled for memory "
 +                                        "type %u\n", (unsigned)type);
 +                              return -EINVAL;
 +                      }
 +                      man->io_offset = dev->agp->agp_info.aper_base;
 +                      man->io_size = dev->agp->agp_info.aper_size *
 +                                     1024 * 1024;
 +                      man->io_addr = NULL;
 +                      man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
 +                                   _DRM_FLAG_MEMTYPE_CSELECT |
 +                                   _DRM_FLAG_NEEDS_IOREMAP;
 +                      man->drm_bus_maptype = _DRM_AGP;
 +                      man->gpu_offset = 0;
 +                      break;
 +              default:
 +                      man->io_offset = dev_priv->gpu_gart_start;
 +                      man->io_size = dev_priv->gpu_gart_size;
 +                      man->io_addr = NULL;
 +                      man->flags = _DRM_FLAG_MEMTYPE_CSELECT |
 +                                   _DRM_FLAG_MEMTYPE_MAPPABLE |
 +                                   _DRM_FLAG_MEMTYPE_CMA;
 +                      man->drm_bus_maptype = _DRM_SCATTER_GATHER;
 +                      break;
 +              }
 +              break;
 +      default:
 +              DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
 +              return -EINVAL;
 +      }
 +      return 0;
 +}
 +
 +int radeon_ms_invalidate_caches(struct drm_device * dev, uint64_t flags)
 +{
 +      struct drm_radeon_private *dev_priv = dev->dev_private;
 +
 +      dev_priv->flush_cache(dev);
 +      return 0;
 +}
 +
 +void radeon_ms_ttm_flush(struct drm_ttm *ttm)
 +{
 +      if (!ttm)
 +              return;
 +
 +      DRM_MEMORYBARRIER();
 +}
index 91ca4a3,0000000..bf76b45
mode 100644,000000..100644
--- /dev/null
@@@ -1,318 -1,0 +1,318 @@@
-       .evict_mask = radeon_ms_evict_mask,
 +/*
 + * Copyright 2007 Jérôme Glisse
 + * Copyright 2007 Alex Deucher
 + * Copyright 2007 Dave Airlie
 + * All Rights Reserved.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice (including the next
 + * paragraph) shall be included in all copies or substantial portions of the
 + * Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 + * DEALINGS IN THE SOFTWARE.
 + */
 +/*
 + * Authors:
 + *    Jerome Glisse <glisse@freedesktop.org>
 + */
 +#include "drm_pciids.h"
 +#include "radeon_ms.h"
 +
 +
 +static uint32_t radeon_ms_mem_prios[] = {
 +      DRM_BO_MEM_VRAM,
 +      DRM_BO_MEM_TT,
 +      DRM_BO_MEM_LOCAL,
 +};
 +
 +static uint32_t radeon_ms_busy_prios[] = {
 +      DRM_BO_MEM_TT,
 +      DRM_BO_MEM_VRAM,
 +      DRM_BO_MEM_LOCAL,
 +};
 +
 +struct drm_fence_driver radeon_ms_fence_driver = {
 +      .num_classes = 1,
 +      .wrap_diff = (1 << 30),
 +      .flush_diff = (1 << 29),
 +      .sequence_mask = 0xffffffffU,
 +      .lazy_capable = 1,
 +      .emit = radeon_ms_fence_emit_sequence,
 +      .poke_flush = radeon_ms_poke_flush,
 +      .has_irq = radeon_ms_fence_has_irq,
 +};
 +
 +struct drm_bo_driver radeon_ms_bo_driver = {
 +      .mem_type_prio = radeon_ms_mem_prios,
 +      .mem_busy_prio = radeon_ms_busy_prios,
 +      .num_mem_type_prio = sizeof(radeon_ms_mem_prios)/sizeof(uint32_t),
 +      .num_mem_busy_prio = sizeof(radeon_ms_busy_prios)/sizeof(uint32_t),
 +      .create_ttm_backend_entry = radeon_ms_create_ttm_backend,
 +      .fence_type = radeon_ms_fence_types,
 +      .invalidate_caches = radeon_ms_invalidate_caches,
 +      .init_mem_type = radeon_ms_init_mem_type,
++      .evict_flags = radeon_ms_evict_flags,
 +      .move = radeon_ms_bo_move,
 +      .ttm_cache_flush = radeon_ms_ttm_flush,
 +};
 +
 +struct drm_ioctl_desc radeon_ms_ioctls[] = {
 +      DRM_IOCTL_DEF(DRM_RADEON_EXECBUFFER, radeon_ms_execbuffer, DRM_AUTH),
 +};
 +int radeon_ms_num_ioctls = DRM_ARRAY_SIZE(radeon_ms_ioctls);
 +
 +int radeon_ms_driver_dma_ioctl(struct drm_device *dev, void *data,
 +                             struct drm_file *file_priv)
 +{
 +      struct drm_device_dma *dma = dev->dma;
 +      struct drm_dma *d = data;
 +
 +      LOCK_TEST_WITH_RETURN(dev, file_priv);
 +
 +      /* Please don't send us buffers.
 +       */
 +      if (d->send_count != 0) {
 +              DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
 +                        DRM_CURRENTPID, d->send_count);
 +              return -EINVAL;
 +      }
 +
 +      /* Don't ask us buffer neither :)
 +       */
 +      DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
 +                DRM_CURRENTPID, d->request_count, dma->buf_count);
 +      return -EINVAL;
 +}
 +
 +void radeon_ms_driver_lastclose(struct drm_device * dev)
 +{
 +}
 +
 +int radeon_ms_driver_load(struct drm_device *dev, unsigned long flags)
 +{
 +      struct drm_radeon_private *dev_priv;
 +      int ret = 0;
 +
 +      DRM_INFO("[radeon_ms] loading\n");
 +      /* allocate and clear device private structure */
 +      dev_priv = drm_alloc(sizeof(struct drm_radeon_private), DRM_MEM_DRIVER);
 +      if (dev_priv == NULL)
 +              return -ENOMEM;
 +      memset(dev_priv, 0, sizeof(struct drm_radeon_private));
 +      dev->dev_private = (void *)dev_priv;
 +
 +      /* initialize modesetting structure (must be done here) */
 +      drm_mode_config_init(dev);
 +
 +      /* flags correspond to chipset family */
 +      dev_priv->usec_timeout = 100;
 +      dev_priv->family = flags & 0xffffU;
 +      dev_priv->bus_type = flags & 0xff0000U;
 +      /* initialize family functions */
 +      ret = radeon_ms_family_init(dev);
 +      if (ret != 0) {
 +              radeon_ms_driver_unload(dev);
 +              return ret;
 +      }
 +
 +      /* we don't want userspace to be able to map this so don't use
 +       * drm_addmap */
 +      dev_priv->mmio.offset = drm_get_resource_start(dev, 2);
 +      dev_priv->mmio.size = drm_get_resource_len(dev, 2);
 +      dev_priv->mmio.type = _DRM_REGISTERS;
 +      dev_priv->mmio.flags = _DRM_RESTRICTED;
 +      drm_core_ioremap(&dev_priv->mmio, dev); 
 +      /* map vram FIXME: IGP likely don't have any of this */
 +      dev_priv->vram.offset = drm_get_resource_start(dev, 0);
 +      dev_priv->vram.size = drm_get_resource_len(dev, 0);
 +      dev_priv->vram.type = _DRM_FRAME_BUFFER;
 +      dev_priv->vram.flags = _DRM_RESTRICTED;
 +      drm_core_ioremap(&dev_priv->vram, dev);
 +
 +      /* save radeon initial state which will be restored upon module
 +       * exit */
 +      radeon_ms_state_save(dev, &dev_priv->load_state);
 +      dev_priv->restore_state = 1;
 +      memcpy(&dev_priv->driver_state, &dev_priv->load_state,
 +             sizeof(struct radeon_state));
 +
 +      /* initialize irq */
 +      ret = radeon_ms_irq_init(dev);
 +      if (ret != 0) {
 +              radeon_ms_driver_unload(dev);
 +              return ret;
 +      }
 +
 +      /* init bo driver */
 +      dev_priv->fence_id_last = 1;
 +      dev_priv->fence_reg = SCRATCH_REG2;
 +      drm_bo_driver_init(dev);
 +      /* initialize vram */
 +      ret = drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, dev_priv->vram.size);
 +      if (ret != 0) {
 +              radeon_ms_driver_unload(dev);
 +              return ret;
 +      }
 +
 +      /* initialize gpu address space (only after) VRAM initialization */
 +      ret = radeon_ms_gpu_initialize(dev);
 +      if (ret != 0) {
 +              radeon_ms_driver_unload(dev);
 +              return ret;
 +      }
 +      radeon_ms_gpu_restore(dev, &dev_priv->driver_state);
 +
 +      /* initialize ttm */
 +      ret = drm_bo_init_mm(dev, DRM_BO_MEM_TT, 0,
 +                           dev_priv->gpu_gart_size / RADEON_PAGE_SIZE);
 +      if (ret != 0) {
 +              radeon_ms_driver_unload(dev);
 +              return ret;
 +      }
 +
 +      /* initialize ring buffer */
 +      /* set ring size to 4Mo FIXME: should make a parameter for this */
 +      dev_priv->write_back_area_size = 4 * 1024;
 +      dev_priv->ring_buffer_size = 4 * 1024 * 1024;
 +      ret = radeon_ms_cp_init(dev);
 +      if (ret != 0) {
 +              radeon_ms_driver_unload(dev);
 +              return ret;
 +      }
 +
 +      /* initialize modesetting */
 +      dev->mode_config.min_width = 0;
 +      dev->mode_config.min_height = 0;
 +      dev->mode_config.max_width = 4096;
 +      dev->mode_config.max_height = 4096;
 +      dev->mode_config.fb_base = dev_priv->vram.offset;
 +      ret = radeon_ms_crtc_create(dev, 1);
 +      if (ret != 0) {
 +              radeon_ms_driver_unload(dev);
 +              return ret;
 +      }
 +      ret = radeon_ms_outputs_from_rom(dev);
 +      if (ret < 0) {
 +              radeon_ms_driver_unload(dev);
 +              return ret;
 +      } else if (!ret) {
 +              ret = radeon_ms_outputs_from_properties(dev);
 +              if (ret < 0) {
 +                      radeon_ms_driver_unload(dev);
 +                      return ret;
 +              } else if (ret == 0) {
 +                      DRM_INFO("[radeon_ms] no outputs !\n");
 +              }
 +      } else {
 +              DRM_INFO("[radeon_ms] added %d outputs from rom.\n", ret);
 +      }
 +      ret = radeon_ms_connectors_from_rom(dev);
 +      if (ret < 0) {
 +              radeon_ms_driver_unload(dev);
 +              return ret;
 +      } else if (!ret) {
 +              ret = radeon_ms_connectors_from_properties(dev);
 +              if (ret < 0) {
 +                      radeon_ms_driver_unload(dev);
 +                      return ret;
 +              } else if (!ret) {
 +                      DRM_INFO("[radeon_ms] no connectors !\n");
 +              }
 +      } else {
 +              DRM_INFO("[radeon_ms] added %d connectors from rom.\n", ret);
 +      }
 +      radeon_ms_outputs_save(dev, &dev_priv->load_state);
 +      drm_initial_config(dev, false);
 +
 +      ret = drm_irq_install(dev);
 +      if (ret != 0) {
 +              radeon_ms_driver_unload(dev);
 +              return ret;
 +      }
 +
 +      DRM_INFO("[radeon_ms] successfull initialization\n");
 +      return 0;
 +}
 +
 +int radeon_ms_driver_open(struct drm_device * dev, struct drm_file *file_priv)
 +{
 +      return 0;
 +}
 +
 +
 +int radeon_ms_driver_unload(struct drm_device *dev)
 +{
 +      struct drm_radeon_private *dev_priv = dev->dev_private;
 +
 +      if (dev_priv == NULL) {
 +              return 0;
 +      }
 +
 +      /* cleanup modesetting */
 +      drm_mode_config_cleanup(dev);
 +      DRM_INFO("[radeon_ms] modesetting clean\n");
 +      radeon_ms_outputs_restore(dev, &dev_priv->load_state);
 +      radeon_ms_connectors_destroy(dev);
 +      radeon_ms_outputs_destroy(dev);
 +      
 +      /* shutdown cp engine */
 +      radeon_ms_cp_finish(dev);
 +      DRM_INFO("[radeon_ms] cp clean\n");
 +
 +      drm_irq_uninstall(dev);
 +      DRM_INFO("[radeon_ms] irq uninstalled\n");
 +
 +      DRM_INFO("[radeon_ms] unloading\n");
 +      /* clean ttm memory manager */
 +      mutex_lock(&dev->struct_mutex);
 +      if (drm_bo_clean_mm(dev, DRM_BO_MEM_TT)) {
 +              DRM_ERROR("TT memory manager not clean. Delaying takedown\n");
 +      }
 +      mutex_unlock(&dev->struct_mutex);
 +      DRM_INFO("[radeon_ms] TT memory clean\n");
 +      /* finish */
 +      if (dev_priv->bus_finish) {
 +              dev_priv->bus_finish(dev);
 +      }
 +      DRM_INFO("[radeon_ms] bus down\n");
 +      /* clean vram memory manager */
 +      mutex_lock(&dev->struct_mutex);
 +      if (drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM)) {
 +              DRM_ERROR("VRAM memory manager not clean. Delaying takedown\n");
 +      }
 +      mutex_unlock(&dev->struct_mutex);
 +      DRM_INFO("[radeon_ms] VRAM memory clean\n");
 +      /* clean memory manager */
 +      drm_bo_driver_finish(dev);
 +      DRM_INFO("[radeon_ms] memory manager clean\n");
 +      /* restore card state */
 +      if (dev_priv->restore_state) {
 +              radeon_ms_state_restore(dev, &dev_priv->load_state);
 +      }
 +      DRM_INFO("[radeon_ms] state restored\n");
 +      if (dev_priv->mmio.handle) {
 +              drm_core_ioremapfree(&dev_priv->mmio, dev);
 +      }
 +      if (dev_priv->vram.handle) {
 +              drm_core_ioremapfree(&dev_priv->vram, dev);
 +      }
 +      DRM_INFO("[radeon_ms] map released\n");
 +      drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
 +      dev->dev_private = NULL;
 +
 +      DRM_INFO("[radeon_ms] that's all the folks\n");
 +      return 0;
 +}
 +