Passed the compile test; it's ready to ship.
Conflicts:
libdrm/Makefile.am
linux-core/Makefile.kernel
linux-core/drmP.h
linux-core/drm_memrange.c
linux-core/drm_stub.c
shared-core/drm.h
shared-core/i915_dma.c
shared-core/i915_drv.h
shared-core/i915_irq.c
libdrm_la_LDFLAGS = -version-number 2:3:0 -no-undefined
AM_CFLAGS = -I$(top_srcdir)/shared-core
- libdrm_la_SOURCES = xf86drm.c xf86drmHash.c xf86drmRandom.c xf86drmSL.c xf86drmMode.c
+ libdrm_la_SOURCES = xf86drm.c xf86drmHash.c xf86drmRandom.c xf86drmSL.c \
- dri_bufmgr.c
++ xf86drmMode.c dri_bufmgr.c
+ libdrm_la_LIBADD = intel/libdrm_intel.la
libdrmincludedir = ${includedir}
- libdrminclude_HEADERS = xf86drm.h xf86mm.h xf86drmMode.h
-libdrminclude_HEADERS = xf86drm.h xf86mm.h dri_bufmgr.h
++libdrminclude_HEADERS = xf86drm.h xf86mm.h xf86drmMode.h dri_bufmgr.h
EXTRA_DIST = ChangeLog TODO
extern int drmOpenOnce(void *unused, const char *BusID, int *newlyopened);
extern void drmCloseOnce(int fd);
+ extern void drmMsg(const char *format, ...);
+extern int drmSetMaster(int fd);
+extern int drmDropMaster(int fd);
+
#include "xf86mm.h"
#endif
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
drm_memory_debug.o ati_pcigart.o drm_sman.o \
- drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
+ drm_hashtab.o drm_memrange.o drm_object.o drm_compat.o \
- drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \
- drm_regman.o drm_vm_nopage_compat.o drm_gem.o
+ drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_crtc.o \
+ drm_edid.o drm_modes.o drm_bo_lock.o drm_regman.o \
- drm_vm_nopage_compat.o drm_crtc_helper.o
++ drm_vm_nopage_compat.o drm_crtc_helper.o drm_gem.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
i810-objs := i810_drv.o i810_dma.o
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
- i915_buffer.o i915_execbuf.o \
- i915_buffer.o i915_compat.o i915_execbuf.o i915_gem.o
++ i915_buffer.o i915_execbuf.o i915_gem.o \
+ intel_display.o intel_crt.o intel_lvds.o intel_bios.o \
+ intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o \
+ intel_tv.o i915_compat.o intel_dvo.o dvo_ch7xxx.o \
+ dvo_ch7017.o dvo_ivch.o dvo_tfp410.o dvo_sil164.o
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
nouveau_sgdma.o nouveau_dma.o nouveau_bo.o nouveau_fence.o \
#define DRIVER_IRQ_SHARED 0x80
#define DRIVER_DMA_QUEUE 0x100
#define DRIVER_FB_DMA 0x200
-#define DRIVER_GEM 0x400
+#define DRIVER_MODESET 0x400
-
++#define DRIVER_GEM 0x800
/*@}*/
struct drm_hash_item hash;
struct drm_map *map; /**< mapping */
uint64_t user_token;
- struct drm_mm_node *file_offset_node;
+ struct drm_master *master; /** if this map is associated with a specific
+ master */
+ struct drm_memrange_node *file_offset_node;
};
typedef struct drm_map drm_local_map_t;
int table_size;
};
+ /**
+ * This structure defines the drm_mm memory object, which will be used by the
+ * DRM for its buffer objects.
+ */
+ struct drm_gem_object {
+ /** Reference count of this object */
+ struct kref refcount;
+
+ /** Handle count of this object. Each handle also holds a reference */
+ struct kref handlecount;
+
+ /** Related drm device */
+ struct drm_device *dev;
+
+ /** File representing the shmem storage */
+ struct file *filp;
+
+ /**
+ * Size of the object, in bytes. Immutable over the object's
+ * lifetime.
+ */
+ size_t size;
+
+ /**
+ * Global name for this object, starts at 1. 0 means unnamed.
+ * Access is covered by the object_name_lock in the related drm_device
+ */
+ int name;
+
+ /**
+ * Memory domains. These monitor which caches contain read/write data
+ * related to the object. When transitioning from one set of domains
+ * to another, the driver is called to ensure that caches are suitably
+ * flushed and invalidated
+ */
+ uint32_t read_domains;
+ uint32_t write_domain;
+
+ /**
+ * While validating an exec operation, the
+ * new read/write domain values are computed here.
+ * They will be transferred to the above values
+ * at the point that any cache flushing occurs
+ */
+ uint32_t pending_read_domains;
+ uint32_t pending_write_domain;
+
+ void *driver_private;
+ };
+
#include "drm_objects.h"
+#include "drm_crtc.h"
+
+/* per-master structure */
+struct drm_master {
+
+ struct list_head head; /**< each minor contains a list of masters */
+ struct drm_minor *minor; /**< link back to minor we are a master for */
+
+ char *unique; /**< Unique identifier: e.g., busid */
+ int unique_len; /**< Length of unique field */
+
+ int blocked; /**< Blocked due to VC switch? */
+
+ /** \name Authentication */
+ /*@{ */
+ struct drm_open_hash magiclist;
+ struct list_head magicfree;
+ /*@} */
+
+ struct drm_lock_data lock; /**< Information on hardware lock */
+
+ void *driver_priv; /**< Private structure for driver to use */
+};
/**
* DRM driver structure. This structure represent the common code for
void (*set_version) (struct drm_device *dev,
struct drm_set_version *sv);
+ /* Master routines */
+ int (*master_create)(struct drm_device *dev, struct drm_master *master);
+ void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
+ /**
+ * Driver-specific constructor for drm_gem_objects, to set up
+ * obj->driver_private.
+ *
+ * Returns 0 on success.
+ */
+ int (*gem_init_object) (struct drm_gem_object *obj);
+ void (*gem_free_object) (struct drm_gem_object *obj);
+
+ /**
+ * Driver-specific callback to set memory domains from userspace
+ */
+ int (*gem_set_domain) (struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ uint32_t read_domains,
+ uint32_t write_domain);
+
+ /**
+ * Driver-specific callback to flush pwrite through chipset
+ */
+ int (*gem_flush_pwrite) (struct drm_gem_object *obj,
+ uint64_t offset,
+ uint64_t size);
struct drm_fence_driver *fence_driver;
struct drm_bo_driver *bo_driver;
struct idr drw_idr;
/*@} */
+ /* DRM mode setting */
+ struct drm_mode_config mode_config;
++
+ /** \name GEM information */
+ /*@{ */
+ spinlock_t object_name_lock;
+ struct idr object_name_idr;
+ atomic_t object_count;
+ uint32_t invalidate_domains; /* domains pending invalidation */
+ uint32_t flush_domains; /* domains pending flush */
+ /*@} */
};
#if __OS_HAS_AGP
extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
extern void drm_idlelock_take(struct drm_lock_data *lock_data);
extern void drm_idlelock_release(struct drm_lock_data *lock_data);
-extern void drm_client_lock_release(struct drm_device *dev);
+ extern int drm_client_lock_take(struct drm_device *dev,
+ struct drm_file *file_priv);
++extern void drm_client_lock_release(struct drm_device *dev,
++ struct drm_file *file_priv);
/*
* These are exported to drivers so that they can implement fencing using
extern struct class *drm_sysfs_create(struct module *owner, char *name);
extern void drm_sysfs_destroy(void);
extern int drm_sysfs_device_add(struct drm_minor *minor);
+extern void drm_sysfs_hotplug_event(struct drm_device *dev);
extern void drm_sysfs_device_remove(struct drm_minor *minor);
+extern char *drm_get_connector_status_name(enum drm_connector_status status);
+extern int drm_sysfs_connector_add(struct drm_connector *connector);
+extern void drm_sysfs_connector_remove(struct drm_connector *connector);
/*
- * Basic memory manager support (drm_mm.c)
+ * Basic memory manager support (drm_memrange.c)
*/
- extern struct drm_mm_node * drm_mm_get_block(struct drm_mm_node * parent, unsigned long size,
- unsigned alignment);
- extern void drm_mm_put_block(struct drm_mm_node *cur);
- extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
- unsigned alignment, int best_match);
- extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
- extern void drm_mm_takedown(struct drm_mm *mm);
- extern int drm_mm_clean(struct drm_mm *mm);
- extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
- extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
- extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
- extern void drm_mm_print(struct drm_mm *mm, const char *name);
-
- static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
+ extern struct drm_memrange_node *drm_memrange_get_block(struct drm_memrange_node * parent,
+ unsigned long size,
+ unsigned alignment);
+ extern void drm_memrange_put_block(struct drm_memrange_node *cur);
+ extern struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange *mm,
+ unsigned long size,
+ unsigned alignment, int best_match);
+ extern int drm_memrange_init(struct drm_memrange *mm,
+ unsigned long start, unsigned long size);
+ extern void drm_memrange_takedown(struct drm_memrange *mm);
+ extern int drm_memrange_clean(struct drm_memrange *mm);
+ extern unsigned long drm_memrange_tail_space(struct drm_memrange *mm);
+ extern int drm_memrange_remove_space_from_tail(struct drm_memrange *mm,
+ unsigned long size);
+ extern int drm_memrange_add_space_to_tail(struct drm_memrange *mm,
+ unsigned long size);
-
+ static inline struct drm_memrange *drm_get_mm(struct drm_memrange_node *block)
{
return block->mm;
}
current->pid, (long)old_encode_dev(file_priv->minor->device),
dev->open_count);
- if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
- if (drm_i_have_hw_lock(dev, file_priv)) {
- dev->driver->reclaim_buffers_locked(dev, file_priv);
- } else {
- unsigned long _end=jiffies + 3*DRM_HZ;
- int locked = 0;
-
- drm_idlelock_take(&dev->lock);
-
- /*
- * Wait for a while.
- */
-
- do{
- spin_lock_bh(&dev->lock.spinlock);
- locked = dev->lock.idle_has_lock;
- spin_unlock_bh(&dev->lock.spinlock);
- if (locked)
- break;
- schedule();
- } while (!time_after_eq(jiffies, _end));
-
- if (!locked) {
- DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
- "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
- "\tI will go on reclaiming the buffers anyway.\n");
+ /* if the master has gone away we can't do anything with the lock */
+ if (file_priv->minor->master) {
+ if (dev->driver->reclaim_buffers_locked && file_priv->master->lock.hw_lock) {
+ if (drm_i_have_hw_lock(dev, file_priv)) {
+ dev->driver->reclaim_buffers_locked(dev, file_priv);
+ } else {
+ unsigned long _end=jiffies + 3*DRM_HZ;
+ int locked = 0;
+
+ drm_idlelock_take(&file_priv->master->lock);
+
+ /*
+ * Wait for a while.
+ */
+
+ do{
+ spin_lock_bh(&file_priv->master->lock.spinlock);
+ locked = file_priv->master->lock.idle_has_lock;
+ spin_unlock_bh(&file_priv->master->lock.spinlock);
+ if (locked)
+ break;
+ schedule();
+ } while (!time_after_eq(jiffies, _end));
+
+ if (!locked) {
+ DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
+ "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
+ "\tI will go on reclaiming the buffers anyway.\n");
+ }
+
+ dev->driver->reclaim_buffers_locked(dev, file_priv);
+ drm_idlelock_release(&file_priv->master->lock);
}
-
- dev->driver->reclaim_buffers_locked(dev, file_priv);
- drm_idlelock_release(&dev->lock);
}
- }
-
- if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) {
-
- drm_idlelock_take(&dev->lock);
- dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
- drm_idlelock_release(&dev->lock);
- }
+ if (dev->driver->reclaim_buffers_idlelocked && file_priv->master->lock.hw_lock) {
+
+ drm_idlelock_take(&file_priv->master->lock);
+ dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
+ drm_idlelock_release(&file_priv->master->lock);
+
+ }
- if (drm_i_have_hw_lock(dev, file_priv)) {
- DRM_DEBUG("File %p released, freeing lock for context %d\n",
- filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
-
- drm_lock_free(&dev->lock,
- _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
- }
+ if (drm_i_have_hw_lock(dev, file_priv)) {
+ DRM_DEBUG("File %p released, freeing lock for context %d\n",
+ filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+
+ drm_lock_free(&file_priv->master->lock,
+ _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+ }
+
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
- !dev->driver->reclaim_buffers_locked) {
- dev->driver->reclaim_buffers(dev, file_priv);
+ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+ !dev->driver->reclaim_buffers_locked) {
+ dev->driver->reclaim_buffers(dev, file_priv);
+ }
}
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_release(dev, file_priv);
+
drm_fasync(-1, filp, 0);
mutex_lock(&dev->ctxlist_mutex);
}
EXPORT_SYMBOL(drm_idlelock_release);
- * of data structures (such as command ringbuffer) shared with the X Server, and
+ /**
+ * Takes the lock on behalf of the client if needed, using the kernel context.
+ *
+ * This allows us to hide the hardware lock when it's required for protection
- spin_lock_irqsave(&dev->lock.spinlock, irqflags);
- dev->lock.user_waiters++;
- spin_unlock_irqrestore(&dev->lock.spinlock, irqflags);
- ret = wait_event_interruptible(dev->lock.lock_queue,
- drm_lock_take(&dev->lock,
++ * of data structures (such as command ringbuffer) shared with the X Server, an
++
+ * a way for us to transition to lockless for those requests when the X Server
+ * stops accessing the ringbuffer directly, without having to update the
+ * other userland clients.
+ */
+ int drm_client_lock_take(struct drm_device *dev, struct drm_file *file_priv)
+ {
++ struct drm_master *master = file_priv->master;
+ int ret;
+ unsigned long irqflags;
+
+ /* If the client has the lock, we're already done. */
+ if (drm_i_have_hw_lock(dev, file_priv))
+ return 0;
+
+ mutex_unlock (&dev->struct_mutex);
+ /* Client doesn't hold the lock. Block taking the lock with the kernel
+ * context on behalf of the client, and return whether we were
+ * successful.
+ */
- spin_lock_irqsave(&dev->lock.spinlock, irqflags);
- dev->lock.user_waiters--;
++ spin_lock_irqsave(&master->lock.spinlock, irqflags);
++ master->lock.user_waiters++;
++ spin_unlock_irqrestore(&master->lock.spinlock, irqflags);
++ ret = wait_event_interruptible(master->lock.lock_queue,
++ drm_lock_take(&master->lock,
+ DRM_KERNEL_CONTEXT));
- spin_unlock_irqrestore(&dev->lock.spinlock, irqflags);
++ spin_lock_irqsave(&master->lock.spinlock, irqflags);
++ master->lock.user_waiters--;
+ if (ret != 0) {
- dev->lock.file_priv = file_priv;
- dev->lock.lock_time = jiffies;
- dev->lock.kernel_held = 1;
++ spin_unlock_irqrestore(&master->lock.spinlock, irqflags);
+ } else {
- spin_unlock_irqrestore(&dev->lock.spinlock, irqflags);
++ master->lock.file_priv = file_priv;
++ master->lock.lock_time = jiffies;
++ master->lock.kernel_held = 1;
+ file_priv->lock_count++;
-void drm_client_lock_release(struct drm_device *dev)
++ spin_unlock_irqrestore(&master->lock.spinlock, irqflags);
+ }
+ mutex_lock (&dev->struct_mutex);
+ return ret;
+ }
+ EXPORT_SYMBOL(drm_client_lock_take);
+
- if (dev->lock.kernel_held) {
- dev->lock.kernel_held = 0;
- dev->lock.file_priv = NULL;
- drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
++void drm_client_lock_release(struct drm_device *dev, struct drm_file *file_priv)
+ {
++ struct drm_master *master = file_priv->master;
++
++ if (master->lock.kernel_held) {
++ master->lock.kernel_held = 0;
++ master->lock.file_priv = NULL;
++ drm_lock_free(&master->lock, DRM_KERNEL_CONTEXT);
+ }
+ }
+ EXPORT_SYMBOL(drm_client_lock_release);
-
int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
{
-
- return (file_priv->lock_count && dev->lock.hw_lock &&
- _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
- dev->lock.file_priv == file_priv);
+ struct drm_master *master = file_priv->master;
+ return (file_priv->lock_count && master->lock.hw_lock &&
+ _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
+ master->lock.file_priv == file_priv);
}
EXPORT_SYMBOL(drm_i_have_hw_lock);
list_del(&entry->ml_entry);
drm_ctl_free(entry, sizeof(*entry), DRM_MEM_MM);
}
-
- EXPORT_SYMBOL(drm_mm_takedown);
-
- void drm_mm_print(struct drm_mm *mm, const char *name)
- {
- struct list_head *list;
- const struct list_head *mm_stack = &mm->ml_entry;
- struct drm_mm_node *entry;
--
- DRM_DEBUG("Memory usage for '%s'\n", name ? name : "unknown");
- list_for_each(list, mm_stack) {
- entry = list_entry(list, struct drm_mm_node, ml_entry);
- DRM_DEBUG("\t0x%08lx %li %s pages\n", entry->start, entry->size,
- entry->free ? "free" : "used");
- }
- }
- EXPORT_SYMBOL(drm_mm_print);
+ EXPORT_SYMBOL(drm_memrange_takedown);
dev->irq = pdev->irq;
dev->irq_enabled = 0;
- if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) {
+ if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER))
return -ENOMEM;
- }
+
- if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
- DRM_FILE_PAGE_OFFSET_SIZE)) {
+ if (drm_memrange_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
+ DRM_FILE_PAGE_OFFSET_SIZE)) {
drm_ht_remove(&dev->map_hash);
return -ENOMEM;
}
.reclaim_buffers = drm_core_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
+ .master_create = i915_master_create,
+ .master_destroy = i915_master_destroy,
.ioctls = i915_ioctls,
+ .gem_init_object = i915_gem_init_object,
+ .gem_free_object = i915_gem_free_object,
+ .gem_set_domain = i915_gem_set_domain,
+ .gem_flush_pwrite = i915_gem_flush_pwrite,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
--- /dev/null
- drm_i915_private_t *dev_priv = dev->dev_private;
+ /*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+ #include "drmP.h"
+ #include "drm.h"
+ #include "i915_drm.h"
+ #include "i915_drv.h"
+
+ #define WATCH_COHERENCY 0
+ #define WATCH_BUF 0
+ #define WATCH_EXEC 0
+ #define WATCH_LRU 0
+ #define WATCH_RELOC 0
+
+ static int
+ i915_gem_object_set_domain(struct drm_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain);
+
+ int
+ i915_gem_init_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
- drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_init *args = data;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (args->gtt_start >= args->gtt_end ||
+ (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
+ (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ drm_memrange_init(&dev_priv->mm.gtt_space, args->gtt_start,
+ args->gtt_end - args->gtt_start);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+ }
+
+ static void
+ i915_gem_object_free_page_list(struct drm_gem_object *obj)
+ {
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int page_count = obj->size / PAGE_SIZE;
+ int i;
+
+ if (obj_priv->page_list == NULL)
+ return;
+
+
+ for (i = 0; i < page_count; i++)
+ if (obj_priv->page_list[i] != NULL)
+ page_cache_release(obj_priv->page_list[i]);
+
+ drm_free(obj_priv->page_list,
+ page_count * sizeof(struct page *),
+ DRM_MEM_DRIVER);
+ obj_priv->page_list = NULL;
+ }
+
+ static void
+ i915_gem_object_move_to_active(struct drm_gem_object *obj)
+ {
+ struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ /* Add a reference if we're newly entering the active list. */
+ if (!obj_priv->active) {
+ drm_gem_object_reference(obj);
+ obj_priv->active = 1;
+ }
+ /* Move from whatever list we were on to the tail of execution. */
+ list_move_tail(&obj_priv->list,
+ &dev_priv->mm.active_list);
+ }
+
+ static void
+ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
+ {
+ struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ if (obj_priv->pin_count != 0)
+ list_del_init(&obj_priv->list);
+ else
+ list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
+ if (obj_priv->active) {
+ obj_priv->active = 0;
+ drm_gem_object_unreference(obj);
+ }
+ }
+
+ /**
+ * Creates a new sequence number, emitting a write of it to the status page
+ * plus an interrupt, which will trigger i915_user_interrupt_handler.
+ *
+ * Must be called with struct_lock held.
+ *
+ * Returned sequence numbers are nonzero on success.
+ */
+ static uint32_t
+ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
+ {
- OUT_RING(CMD_STORE_DWORD_IDX);
- OUT_RING(I915_GEM_HWS_INDEX << STORE_DWORD_INDEX_SHIFT);
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *request;
+ uint32_t seqno;
+ RING_LOCALS;
+
+ request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
+ if (request == NULL)
+ return 0;
+
+ /* Grab the seqno we're going to make this request be, and bump the
+ * next (skipping 0 so it can be the reserved no-seqno value).
+ */
+ seqno = dev_priv->mm.next_gem_seqno;
+ dev_priv->mm.next_gem_seqno++;
+ if (dev_priv->mm.next_gem_seqno == 0)
+ dev_priv->mm.next_gem_seqno++;
+
+ BEGIN_LP_RING(4);
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t cmd = CMD_MI_FLUSH | MI_NO_WRITE_FLUSH;
++ OUT_RING(MI_STORE_DWORD_INDEX);
++ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(seqno);
+
+ OUT_RING(GFX_OP_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+
+ DRM_DEBUG("%d\n", seqno);
+
+ request->seqno = seqno;
+ request->emitted_jiffies = jiffies;
+ request->flush_domains = flush_domains;
+ if (list_empty(&dev_priv->mm.request_list))
+ mod_timer(&dev_priv->mm.retire_timer, jiffies + HZ);
+
+ list_add_tail(&request->list, &dev_priv->mm.request_list);
+
+ return seqno;
+ }
+
+ /**
+ * Command execution barrier
+ *
+ * Ensures that all commands in the ring are finished
+ * before signalling the CPU
+ */
+
+ uint32_t
+ i915_retire_commands(struct drm_device *dev)
+ {
- drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ uint32_t flush_domains = 0;
+ RING_LOCALS;
+
+ /* The sampler always gets flushed on i965 (sigh) */
+ if (IS_I965G(dev))
+ flush_domains |= DRM_GEM_DOMAIN_I915_SAMPLER;
+ BEGIN_LP_RING(2);
+ OUT_RING(cmd);
+ OUT_RING(0); /* noop */
+ ADVANCE_LP_RING();
+ return flush_domains;
+ }
+
+ /**
+ * Moves buffers associated only with the given active seqno from the active
+ * to inactive list, potentially freeing them.
+ */
+ static void
+ i915_gem_retire_request(struct drm_device *dev,
+ struct drm_i915_gem_request *request)
+ {
- drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (request->flush_domains != 0) {
+ struct drm_i915_gem_object *obj_priv, *next;
+
+ /* First clear any buffers that were only waiting for a flush
+ * matching the one just retired.
+ */
+
+ list_for_each_entry_safe(obj_priv, next,
+ &dev_priv->mm.flushing_list, list) {
+ struct drm_gem_object *obj = obj_priv->obj;
+
+ if (obj->write_domain & request->flush_domains) {
+ obj->write_domain = 0;
+ i915_gem_object_move_to_inactive(obj);
+ }
+ }
+
+ }
+
+ /* Move any buffers on the active list that are no longer referenced
+ * by the ringbuffer to the flushing/inactive lists as appropriate.
+ */
+ while (!list_empty(&dev_priv->mm.active_list)) {
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = list_first_entry(&dev_priv->mm.active_list,
+ struct drm_i915_gem_object,
+ list);
+ obj = obj_priv->obj;
+
+ /* If the seqno being retired doesn't match the oldest in the
+ * list, then the oldest in the list must still be newer than
+ * this seqno.
+ */
+ if (obj_priv->last_rendering_seqno != request->seqno)
+ return;
+ #if WATCH_LRU
+ DRM_INFO("%s: retire %d moves to inactive list %p\n",
+ __func__, request->seqno, obj);
+ #endif
+
+ if (obj->write_domain != 0) {
+ list_move_tail(&obj_priv->list,
+ &dev_priv->mm.flushing_list);
+ } else {
+ i915_gem_object_move_to_inactive(obj);
+ }
+ }
+ }
+
+ /**
+ * Returns true if seq1 is later than seq2.
+ */
+ static int
+ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+ {
+ return (int32_t)(seq1 - seq2) >= 0;
+ }
+
+ static uint32_t
+ i915_get_gem_seqno(struct drm_device *dev)
+ {
- drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+ }
+
+ /**
+ * This function clears the request list as sequence numbers are passed.
+ */
+ void
+ i915_gem_retire_requests(struct drm_device *dev)
+ {
- drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t seqno;
+
+ seqno = i915_get_gem_seqno(dev);
+
+ while (!list_empty(&dev_priv->mm.request_list)) {
+ struct drm_i915_gem_request *request;
+ uint32_t retiring_seqno;
+
+ request = list_first_entry(&dev_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ list);
+ retiring_seqno = request->seqno;
+
+ if (i915_seqno_passed(seqno, retiring_seqno)) {
+ i915_gem_retire_request(dev, request);
+
+ list_del(&request->list);
+ drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
+ } else
+ break;
+ }
+ }
+
+ void
+ i915_gem_retire_timeout(unsigned long data)
+ {
+ struct drm_device *dev = (struct drm_device *) data;
- drm_i915_private_t *dev_priv;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ schedule_work(&dev_priv->mm.retire_task);
+ }
+
+ void
+ i915_gem_retire_handler(struct work_struct *work)
+ {
- dev_priv = container_of(work, drm_i915_private_t,
++ struct drm_i915_private *dev_priv;
+ struct drm_device *dev;
+
- drm_i915_private_t *dev_priv = dev->dev_private;
++ dev_priv = container_of(work, struct drm_i915_private,
+ mm.retire_task);
+ dev = dev_priv->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_retire_requests(dev);
+ if (!list_empty(&dev_priv->mm.request_list))
+ mod_timer(&dev_priv->mm.retire_timer, jiffies + HZ);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ /**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+ int
+ i915_wait_request(struct drm_device *dev, uint32_t seqno)
+ {
- i915_user_irq_on(dev_priv);
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = 0;
+
+ BUG_ON(seqno == 0);
+
+ if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
- i915_user_irq_off(dev_priv);
++ i915_user_irq_on(dev);
+ ret = wait_event_interruptible(dev_priv->irq_queue,
+ i915_seqno_passed(i915_get_gem_seqno(dev),
+ seqno));
- drm_i915_private_t *dev_priv = dev->dev_private;
++ i915_user_irq_off(dev);
+ }
+ if (ret)
+ DRM_ERROR ("%s returns %d (awaiting %d at %d)\n",
+ __func__, ret, seqno, i915_get_gem_seqno(dev));
+
+ /* Directly dispatch request retiring. While we have the work queue
+ * to handle this, the waiter on a request often wants an associated
+ * buffer to have made it to the inactive list, and we would need
+ * a separate wait queue to handle that.
+ */
+ if (ret == 0)
+ i915_gem_retire_requests(dev);
+
+ return ret;
+ }
+
+ static void
+ i915_gem_flush(struct drm_device *dev,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains)
+ {
- cmd = CMD_MI_FLUSH | MI_NO_WRITE_FLUSH;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t cmd;
+ RING_LOCALS;
+
+ #if WATCH_EXEC
+ DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
+ invalidate_domains, flush_domains);
+ #endif
+
+ if (flush_domains & DRM_GEM_DOMAIN_CPU)
+ drm_agp_chipset_flush(dev);
+
+ if ((invalidate_domains|flush_domains) & ~DRM_GEM_DOMAIN_CPU) {
+ /*
+ * read/write caches:
+ *
+ * DRM_GEM_DOMAIN_I915_RENDER is always invalidated, but is
+ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
+ * also flushed at 2d versus 3d pipeline switches.
+ *
+ * read-only caches:
+ *
+ * DRM_GEM_DOMAIN_I915_SAMPLER is flushed on pre-965 if
+ * MI_READ_FLUSH is set, and is always flushed on 965.
+ *
+ * DRM_GEM_DOMAIN_I915_COMMAND may not exist?
+ *
+ * DRM_GEM_DOMAIN_I915_INSTRUCTION, which exists on 965, is
+ * invalidated when MI_EXE_FLUSH is set.
+ *
+ * DRM_GEM_DOMAIN_I915_VERTEX, which exists on 965, is
+ * invalidated with every MI_FLUSH.
+ *
+ * TLBs:
+ *
+ * On 965, TLBs associated with DRM_GEM_DOMAIN_I915_COMMAND
+ * and DRM_GEM_DOMAIN_CPU in are invalidated at PTE write and
+ * DRM_GEM_DOMAIN_I915_RENDER and DRM_GEM_DOMAIN_I915_SAMPLER
+ * are flushed at any MI_FLUSH.
+ */
+
- drm_i915_private_t *dev_priv = dev->dev_private;
++ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ if ((invalidate_domains|flush_domains) &
+ DRM_GEM_DOMAIN_I915_RENDER)
+ cmd &= ~MI_NO_WRITE_FLUSH;
+ if (!IS_I965G(dev)) {
+ /*
+ * On the 965, the sampler cache always gets flushed
+ * and this bit is reserved.
+ */
+ if (invalidate_domains & DRM_GEM_DOMAIN_I915_SAMPLER)
+ cmd |= MI_READ_FLUSH;
+ }
+ if (invalidate_domains & DRM_GEM_DOMAIN_I915_INSTRUCTION)
+ cmd |= MI_EXE_FLUSH;
+
+ #if WATCH_EXEC
+ DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
+ #endif
+ BEGIN_LP_RING(2);
+ OUT_RING(cmd);
+ OUT_RING(0); /* noop */
+ ADVANCE_LP_RING();
+ }
+ }
+
+ /**
+ * Ensures that all rendering to the object has completed and the object is
+ * safe to unbind from the GTT or access from the CPU.
+ */
+ static int
+ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
+ {
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int ret;
+
+ /* If there are writes queued to the buffer, flush and
+ * create a new seqno to wait for.
+ */
+ if (obj->write_domain & ~(DRM_GEM_DOMAIN_CPU)) {
+ uint32_t write_domain = obj->write_domain;
+ #if WATCH_BUF
+ DRM_INFO("%s: flushing object %p from write domain %08x\n",
+ __func__, obj, write_domain);
+ #endif
+ i915_gem_flush(dev, 0, write_domain);
+ obj->write_domain = 0;
+
+ i915_gem_object_move_to_active(obj);
+ obj_priv->last_rendering_seqno = i915_add_request(dev,
+ write_domain);
+ BUG_ON(obj_priv->last_rendering_seqno == 0);
+ #if WATCH_LRU
+ DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
+ #endif
+ }
+ /* If there is rendering queued on the buffer being evicted, wait for
+ * it.
+ */
+ if (obj_priv->active) {
+ #if WATCH_BUF
+ DRM_INFO("%s: object %p wait for seqno %08x\n",
+ __func__, obj, obj_priv->last_rendering_seqno);
+ #endif
+ ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+ }
+
+ /**
+ * Unbinds an object from the GTT aperture.
+ */
+ static int
+ i915_gem_object_unbind(struct drm_gem_object *obj)
+ {
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int ret = 0;
+
+ #if WATCH_BUF
+ DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
+ DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
+ #endif
+ if (obj_priv->gtt_space == NULL)
+ return 0;
+
+ /* Move the object to the CPU domain to ensure that
+ * any possible CPU writes while it's not in the GTT
+ * are flushed when we go to remap it. This will
+ * also ensure that all pending GPU writes are finished
+ * before we unbind.
+ */
+ ret = i915_gem_object_set_domain (obj, DRM_GEM_DOMAIN_CPU,
+ DRM_GEM_DOMAIN_CPU);
+ if (ret)
+ return ret;
+
+ if (obj_priv->agp_mem != NULL) {
+ drm_unbind_agp(obj_priv->agp_mem);
+ drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
+ obj_priv->agp_mem = NULL;
+ }
+
+ i915_gem_object_free_page_list(obj);
+
+ drm_memrange_put_block(obj_priv->gtt_space);
+ obj_priv->gtt_space = NULL;
+
+ /* Remove ourselves from the LRU list if present. */
+ if (!list_empty(&obj_priv->list)) {
+ list_del_init(&obj_priv->list);
+ if (obj_priv->active) {
+ DRM_ERROR("Failed to wait on buffer when unbinding, "
+ "continued anyway.\n");
+ obj_priv->active = 0;
+ drm_gem_object_unreference(obj);
+ }
+ }
+ return 0;
+ }
+
+ #if WATCH_BUF | WATCH_EXEC
+ static void
+ i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
+ uint32_t bias, uint32_t mark)
+ {
+ uint32_t *mem = kmap_atomic(page, KM_USER0);
+ int i;
+ for (i = start; i < end; i += 4)
+ DRM_INFO("%08x: %08x%s\n",
+ (int) (bias + i), mem[i / 4],
+ (bias + i == mark) ? " ********" : "");
+ kunmap_atomic(mem, KM_USER0);
+ /* give syslog time to catch up */
+ msleep(1);
+ }
+
+ static void
+ i915_gem_dump_object(struct drm_gem_object *obj, int len,
+ const char *where, uint32_t mark)
+ {
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int page;
+
+ DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
+ for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
+ int page_len, chunk, chunk_len;
+
+ page_len = len - page * PAGE_SIZE;
+ if (page_len > PAGE_SIZE)
+ page_len = PAGE_SIZE;
+
+ for (chunk = 0; chunk < page_len; chunk += 128) {
+ chunk_len = page_len - chunk;
+ if (chunk_len > 128)
+ chunk_len = 128;
+ i915_gem_dump_page(obj_priv->page_list[page],
+ chunk, chunk + chunk_len,
+ obj_priv->gtt_offset +
+ page * PAGE_SIZE,
+ mark);
+ }
+ }
+ }
+ #endif
+
+ #if WATCH_LRU
+ static void
+ i915_dump_lru(struct drm_device *dev, const char *where)
+ {
- drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+
+ DRM_INFO("active list %s {\n", where);
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
+ list)
+ {
+ DRM_INFO(" %p: %08x\n", obj_priv,
+ obj_priv->last_rendering_seqno);
+ }
+ DRM_INFO("}\n");
+ DRM_INFO("flushing list %s {\n", where);
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
+ list)
+ {
+ DRM_INFO(" %p: %08x\n", obj_priv,
+ obj_priv->last_rendering_seqno);
+ }
+ DRM_INFO("}\n");
+ DRM_INFO("inactive %s {\n", where);
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+ DRM_INFO(" %p: %08x\n", obj_priv,
+ obj_priv->last_rendering_seqno);
+ }
+ DRM_INFO("}\n");
+ }
+ #endif
+
+ static int
+ i915_gem_evict_something(struct drm_device *dev)
+ {
- drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ for (;;) {
+ /* If there's an inactive buffer available now, grab it
+ * and be done.
+ */
+ if (!list_empty(&dev_priv->mm.inactive_list)) {
+ obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
+ struct drm_i915_gem_object,
+ list);
+ obj = obj_priv->obj;
+ BUG_ON(obj_priv->pin_count != 0);
+ break;
+ }
+
+ /* If we didn't get anything, but the ring is still processing
+ * things, wait for one of those things to finish and hopefully
+ * leave us a buffer to evict.
+ */
+ if (!list_empty(&dev_priv->mm.request_list)) {
+ struct drm_i915_gem_request *request;
+ int ret;
+
+ request = list_first_entry(&dev_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ list);
+
+ ret = i915_wait_request(dev, request->seqno);
+ if (ret != 0)
+ return ret;
+
+ continue;
+ }
+
+ /* If we didn't have anything on the request list but there
+ * are buffers awaiting a flush, emit one and try again.
+ * When we wait on it, those buffers waiting for that flush
+ * will get moved to inactive.
+ */
+ if (!list_empty(&dev_priv->mm.flushing_list)) {
+ obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
+ struct drm_i915_gem_object,
+ list);
+ obj = obj_priv->obj;
+
+ i915_gem_flush(dev,
+ obj->write_domain,
+ obj->write_domain);
+ i915_add_request(dev, obj->write_domain);
+
+ obj = NULL;
+ continue;
+ }
+
+ /* If we didn't do any of the above, there's nothing to be done
+ * and we just can't fit it in.
+ */
+ return -ENOMEM;
+ }
+
+ #if WATCH_LRU
+ DRM_INFO("%s: evicting %p\n", __func__, obj);
+ #endif
+
+ BUG_ON(obj_priv->active);
+
+ /* Wait on the rendering and unbind the buffer. */
+ ret = i915_gem_object_unbind(obj);
+
+ return ret;
+ }
+
+ static int
+ i915_gem_object_get_page_list(struct drm_gem_object *obj)
+ {
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int page_count, i;
+ if (obj_priv->page_list)
+ return 0;
+
+ /* Get the list of pages out of our struct file. They'll be pinned
+ * at this point until we release them.
+ */
+ page_count = obj->size / PAGE_SIZE;
+ BUG_ON(obj_priv->page_list != NULL);
+ obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
+ DRM_MEM_DRIVER);
+ if (obj_priv->page_list == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < page_count; i++) {
+ obj_priv->page_list[i] =
+ find_or_create_page(obj->filp->f_mapping, i, GFP_HIGHUSER);
+
+ if (obj_priv->page_list[i] == NULL) {
+ i915_gem_object_free_page_list(obj);
+ return -ENOMEM;
+ }
+ unlock_page(obj_priv->page_list[i]);
+ }
+ return 0;
+ }
+
+ /**
+ * Finds free space in the GTT aperture and binds the object there.
+ */
+ static int
+ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+ {
+ struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_memrange_node *free_space;
+ int page_count, ret;
+
+ if (alignment == 0)
+ alignment = PAGE_SIZE;
+ if (alignment & (PAGE_SIZE - 1)) {
+ DRM_ERROR("Invalid object alignment requested %u\n", alignment);
+ return -EINVAL;
+ }
+
+ search_free:
+ free_space = drm_memrange_search_free(&dev_priv->mm.gtt_space,
+ obj->size,
+ alignment, 0);
+ if (free_space != NULL) {
+ obj_priv->gtt_space =
+ drm_memrange_get_block(free_space, obj->size,
+ alignment);
+ if (obj_priv->gtt_space != NULL) {
+ obj_priv->gtt_space->private = obj;
+ obj_priv->gtt_offset = obj_priv->gtt_space->start;
+ }
+ }
+ if (obj_priv->gtt_space == NULL) {
+ /* If the gtt is empty and we're still having trouble
+ * fitting our object in, we're out of memory.
+ */
+ #if WATCH_LRU
+ DRM_INFO("%s: GTT full, evicting something\n", __func__);
+ #endif
+ if (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.active_list)) {
+ DRM_ERROR("GTT full, but LRU list empty\n");
+ return -ENOMEM;
+ }
+
+ ret = i915_gem_evict_something(dev);
+ if (ret != 0)
+ return ret;
+ goto search_free;
+ }
+
+ #if WATCH_BUF
+ DRM_INFO("Binding object of size %d at 0x%08x\n",
+ obj->size, obj_priv->gtt_offset);
+ #endif
+ ret = i915_gem_object_get_page_list(obj);
+ if (ret) {
+ drm_memrange_put_block(obj_priv->gtt_space);
+ obj_priv->gtt_space = NULL;
+ return ret;
+ }
+
+ page_count = obj->size / PAGE_SIZE;
+ /* Create an AGP memory structure pointing at our pages, and bind it
+ * into the GTT.
+ */
+ obj_priv->agp_mem = drm_agp_bind_pages(dev,
+ obj_priv->page_list,
+ page_count,
+ obj_priv->gtt_offset);
+ if (obj_priv->agp_mem == NULL) {
+ i915_gem_object_free_page_list(obj);
+ drm_memrange_put_block(obj_priv->gtt_space);
+ obj_priv->gtt_space = NULL;
+ return -ENOMEM;
+ }
+
+ /* Assert that the object is not currently in any GPU domain. As it
+ * wasn't in the GTT, there shouldn't be any way it could have been in
+ * a GPU cache
+ */
+ BUG_ON(obj->read_domains & ~DRM_GEM_DOMAIN_CPU);
+ BUG_ON(obj->write_domain & ~DRM_GEM_DOMAIN_CPU);
+
+ return 0;
+ }
+
+ static void
+ i915_gem_clflush_object(struct drm_gem_object *obj)
+ {
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ /* If we don't have a page list set up, then we're not pinned
+ * to GPU, and we can ignore the cache flush because it'll happen
+ * again at bind time.
+ */
+ if (obj_priv->page_list == NULL)
+ return;
+
+ drm_ttm_cache_flush(obj_priv->page_list, obj->size / PAGE_SIZE);
+ }
+
+ /*
+ * Set the next domain for the specified object. This
+ * may not actually perform the necessary flushing/invaliding though,
+ * as that may want to be batched with other set_domain operations
+ *
+ * This is (we hope) the only really tricky part of gem. The goal
+ * is fairly simple -- track which caches hold bits of the object
+ * and make sure they remain coherent. A few concrete examples may
+ * help to explain how it works. For shorthand, we use the notation
+ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
+ * a pair of read and write domain masks.
+ *
+ * Case 1: the batch buffer
+ *
+ * 1. Allocated
+ * 2. Written by CPU
+ * 3. Mapped to GTT
+ * 4. Read by GPU
+ * 5. Unmapped from GTT
+ * 6. Freed
+ *
+ * Let's take these a step at a time
+ *
+ * 1. Allocated
+ * Pages allocated from the kernel may still have
+ * cache contents, so we set them to (CPU, CPU) always.
+ * 2. Written by CPU (using pwrite)
+ * The pwrite function calls set_domain (CPU, CPU) and
+ * this function does nothing (as nothing changes)
+ * 3. Mapped by GTT
+ * This function asserts that the object is not
+ * currently in any GPU-based read or write domains
+ * 4. Read by GPU
+ * i915_gem_execbuffer calls set_domain (COMMAND, 0).
+ * As write_domain is zero, this function adds in the
+ * current read domains (CPU+COMMAND, 0).
+ * flush_domains is set to CPU.
+ * invalidate_domains is set to COMMAND
+ * clflush is run to get data out of the CPU caches
+ * then i915_dev_set_domain calls i915_gem_flush to
+ * emit an MI_FLUSH and drm_agp_chipset_flush
+ * 5. Unmapped from GTT
+ * i915_gem_object_unbind calls set_domain (CPU, CPU)
+ * flush_domains and invalidate_domains end up both zero
+ * so no flushing/invalidating happens
+ * 6. Freed
+ * yay, done
+ *
+ * Case 2: The shared render buffer
+ *
+ * 1. Allocated
+ * 2. Mapped to GTT
+ * 3. Read/written by GPU
+ * 4. set_domain to (CPU,CPU)
+ * 5. Read/written by CPU
+ * 6. Read/written by GPU
+ *
+ * 1. Allocated
+ * Same as last example, (CPU, CPU)
+ * 2. Mapped to GTT
+ * Nothing changes (assertions find that it is not in the GPU)
+ * 3. Read/written by GPU
+ * execbuffer calls set_domain (RENDER, RENDER)
+ * flush_domains gets CPU
+ * invalidate_domains gets GPU
+ * clflush (obj)
+ * MI_FLUSH and drm_agp_chipset_flush
+ * 4. set_domain (CPU, CPU)
+ * flush_domains gets GPU
+ * invalidate_domains gets CPU
+ * wait_rendering (obj) to make sure all drawing is complete.
+ * This will include an MI_FLUSH to get the data from GPU
+ * to memory
+ * clflush (obj) to invalidate the CPU cache
+ * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
+ * 5. Read/written by CPU
+ * cache lines are loaded and dirtied
+ * 6. Read written by GPU
+ * Same as last GPU access
+ *
+ * Case 3: The constant buffer
+ *
+ * 1. Allocated
+ * 2. Written by CPU
+ * 3. Read by GPU
+ * 4. Updated (written) by CPU again
+ * 5. Read by GPU
+ *
+ * 1. Allocated
+ * (CPU, CPU)
+ * 2. Written by CPU
+ * (CPU, CPU)
+ * 3. Read by GPU
+ * (CPU+RENDER, 0)
+ * flush_domains = CPU
+ * invalidate_domains = RENDER
+ * clflush (obj)
+ * MI_FLUSH
+ * drm_agp_chipset_flush
+ * 4. Updated (written) by CPU again
+ * (CPU, CPU)
+ * flush_domains = 0 (no previous write domain)
+ * invalidate_domains = 0 (no new read domains)
+ * 5. Read by GPU
+ * (CPU+RENDER, 0)
+ * flush_domains = CPU
+ * invalidate_domains = RENDER
+ * clflush (obj)
+ * MI_FLUSH
+ * drm_agp_chipset_flush
+ */
+ static int
+ i915_gem_object_set_domain(struct drm_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain)
+ {
+ struct drm_device *dev = obj->dev;
+ uint32_t invalidate_domains = 0;
+ uint32_t flush_domains = 0;
+ int ret;
+
+ #if WATCH_BUF
+ DRM_INFO("%s: object %p read %08x write %08x\n",
+ __func__, obj, read_domains, write_domain);
+ #endif
+ /*
+ * If the object isn't moving to a new write domain,
+ * let the object stay in multiple read domains
+ */
+ if (write_domain == 0)
+ read_domains |= obj->read_domains;
+
+ /*
+ * Flush the current write domain if
+ * the new read domains don't match. Invalidate
+ * any read domains which differ from the old
+ * write domain
+ */
+ if (obj->write_domain && obj->write_domain != read_domains) {
+ flush_domains |= obj->write_domain;
+ invalidate_domains |= read_domains & ~obj->write_domain;
+ }
+ /*
+ * Invalidate any read caches which may have
+ * stale data. That is, any new read domains.
+ */
+ invalidate_domains |= read_domains & ~obj->read_domains;
+ if ((flush_domains | invalidate_domains) & DRM_GEM_DOMAIN_CPU) {
+ #if WATCH_BUF
+ DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
+ __func__, flush_domains, invalidate_domains);
+ #endif
+ /*
+ * If we're invaliding the CPU cache and flushing a GPU cache,
+ * then pause for rendering so that the GPU caches will be
+ * flushed before the cpu cache is invalidated
+ */
+ if ((invalidate_domains & DRM_GEM_DOMAIN_CPU) &&
+ (flush_domains & ~DRM_GEM_DOMAIN_CPU)) {
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret)
+ return ret;
+ }
+ i915_gem_clflush_object(obj);
+ }
+
+ if ((write_domain | flush_domains) != 0)
+ obj->write_domain = write_domain;
+ obj->read_domains = read_domains;
+ dev->invalidate_domains |= invalidate_domains;
+ dev->flush_domains |= flush_domains;
+ return 0;
+ }
+
+ /**
+ * Once all of the objects have been set in the proper domain,
+ * perform the necessary flush and invalidate operations.
+ *
+ * Returns the write domains flushed, for use in flush tracking.
+ */
+ static uint32_t
+ i915_gem_dev_set_domain(struct drm_device *dev)
+ {
+ uint32_t flush_domains = dev->flush_domains;
+
+ /*
+ * Now that all the buffers are synced to the proper domains,
+ * flush and invalidate the collected domains
+ */
+ if (dev->invalidate_domains | dev->flush_domains) {
+ #if WATCH_EXEC
+ DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+ __func__,
+ dev->invalidate_domains,
+ dev->flush_domains);
+ #endif
+ i915_gem_flush(dev,
+ dev->invalidate_domains,
+ dev->flush_domains);
+ dev->invalidate_domains = 0;
+ dev->flush_domains = 0;
+ }
+
+ return flush_domains;
+ }
+
+ #if WATCH_COHERENCY
+ static void
+ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
+ {
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int page;
+ uint32_t *gtt_mapping;
+ uint32_t *backing_map = NULL;
+ int bad_count = 0;
+
+ DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
+ __FUNCTION__, obj, obj_priv->gtt_offset, handle,
+ obj->size / 1024);
+
+ gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
+ obj->size);
+ if (gtt_mapping == NULL) {
+ DRM_ERROR("failed to map GTT space\n");
+ return;
+ }
+
+ for (page = 0; page < obj->size / PAGE_SIZE; page++) {
+ int i;
+
+ backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
+
+ if (backing_map == NULL) {
+ DRM_ERROR("failed to map backing page\n");
+ goto out;
+ }
+
+ for (i = 0; i < PAGE_SIZE / 4; i++) {
+ uint32_t cpuval = backing_map[i];
+ uint32_t gttval = readl(gtt_mapping +
+ page * 1024 + i);
+
+ if (cpuval != gttval) {
+ DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
+ "0x%08x vs 0x%08x\n",
+ (int)(obj_priv->gtt_offset +
+ page * PAGE_SIZE + i * 4),
+ cpuval, gttval);
+ if (bad_count++ >= 8) {
+ DRM_INFO("...\n");
+ goto out;
+ }
+ }
+ }
+ kunmap_atomic(backing_map, KM_USER0);
+ backing_map = NULL;
+ }
+
+ out:
+ if (backing_map != NULL)
+ kunmap_atomic(backing_map, KM_USER0);
+ iounmap(gtt_mapping);
+
+ /* give syslog time to catch up */
+ msleep(1);
+
+ /* Directly flush the object, since we just loaded values with the CPU
+ * from thebacking pages and we don't want to disturb the cache
+ * management that we're trying to observe.
+ */
+
+ i915_gem_clflush_object(obj);
+ }
+ #endif
+
+ /**
+ * Bind an object to the GTT and evaluate the relocations landing in it
+ *
+ *
+ */
+ static int
+ i915_gem_object_bind_and_relocate(struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_exec_object *entry)
+ {
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_relocation_entry reloc;
+ struct drm_i915_gem_relocation_entry __user *relocs;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int i;
+ uint32_t last_reloc_offset = -1;
+ void *reloc_page = NULL;
+
+ /* Choose the GTT offset for our buffer and put it there. */
+ if (obj_priv->gtt_space == NULL) {
+ i915_gem_object_bind_to_gtt(obj, (unsigned) entry->alignment);
+ if (obj_priv->gtt_space == NULL)
+ return -ENOMEM;
+ }
+
+ entry->offset = obj_priv->gtt_offset;
+
+ relocs = (struct drm_i915_gem_relocation_entry __user *)
+ (uintptr_t) entry->relocs_ptr;
+ /* Apply the relocations, using the GTT aperture to avoid cache
+ * flushing requirements.
+ */
+ for (i = 0; i < entry->relocation_count; i++) {
+ struct drm_gem_object *target_obj;
+ struct drm_i915_gem_object *target_obj_priv;
+ uint32_t reloc_val, reloc_offset, *reloc_entry;
+ int ret;
+
+ ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
+ if (ret != 0)
+ return ret;
+
+ target_obj = drm_gem_object_lookup(obj->dev, file_priv,
+ reloc.target_handle);
+ if (target_obj == NULL)
+ return -EINVAL;
+ target_obj_priv = target_obj->driver_private;
+
+ /* The target buffer should have appeared before us in the
+ * exec_object list, so it should have a GTT space bound by now.
+ */
+ if (target_obj_priv->gtt_space == NULL) {
+ DRM_ERROR("No GTT space found for object %d\n",
+ reloc.target_handle);
+ drm_gem_object_unreference(target_obj);
+ return -EINVAL;
+ }
+
+ if (reloc.offset > obj->size - 4) {
+ DRM_ERROR("Relocation beyond object bounds: "
+ "obj %p target %d offset %d size %d.\n",
+ obj, reloc.target_handle,
+ (int) reloc.offset, (int) obj->size);
+ drm_gem_object_unreference(target_obj);
+ return -EINVAL;
+ }
+ if (reloc.offset & 3) {
+ DRM_ERROR("Relocation not 4-byte aligned: "
+ "obj %p target %d offset %d.\n",
+ obj, reloc.target_handle,
+ (int) reloc.offset);
+ drm_gem_object_unreference(target_obj);
+ return -EINVAL;
+ }
+
+ if (reloc.write_domain && target_obj->pending_write_domain &&
+ reloc.write_domain != target_obj->pending_write_domain) {
+ DRM_ERROR("Write domain conflict: "
+ "obj %p target %d offset %d "
+ "new %08x old %08x\n",
+ obj, reloc.target_handle,
+ (int) reloc.offset,
+ reloc.write_domain,
+ target_obj->pending_write_domain);
+ drm_gem_object_unreference(target_obj);
+ return -EINVAL;
+ }
+
+ #if WATCH_RELOC
+ DRM_INFO("%s: obj %p offset %08x target %d "
+ "read %08x write %08x gtt %08x "
+ "presumed %08x delta %08x\n",
+ __func__,
+ obj,
+ (int) reloc.offset,
+ (int) reloc.target_handle,
+ (int) reloc.read_domains,
+ (int) reloc.write_domain,
+ (int) target_obj_priv->gtt_offset,
+ (int) reloc.presumed_offset,
+ reloc.delta);
+ #endif
+
+ target_obj->pending_read_domains |= reloc.read_domains;
+ target_obj->pending_write_domain |= reloc.write_domain;
+
+ /* If the relocation already has the right value in it, no
+ * more work needs to be done.
+ */
+ if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
+ drm_gem_object_unreference(target_obj);
+ continue;
+ }
+
+ /* Now that we're going to actually write some data in,
+ * make sure that any rendering using this buffer's contents
+ * is completed.
+ */
+ i915_gem_object_wait_rendering(obj);
+
+ /* As we're writing through the gtt, flush
+ * any CPU writes before we write the relocations
+ */
+ if (obj->write_domain & DRM_GEM_DOMAIN_CPU) {
+ i915_gem_clflush_object(obj);
+ drm_agp_chipset_flush(dev);
+ obj->write_domain = 0;
+ }
+
+ /* Map the page containing the relocation we're going to
+ * perform.
+ */
+ reloc_offset = obj_priv->gtt_offset + reloc.offset;
+ if (reloc_page == NULL ||
+ (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
+ (reloc_offset & ~(PAGE_SIZE - 1))) {
+ if (reloc_page != NULL)
+ iounmap(reloc_page);
+
+ reloc_page = ioremap(dev->agp->base +
+ (reloc_offset & ~(PAGE_SIZE - 1)),
+ PAGE_SIZE);
+ last_reloc_offset = reloc_offset;
+ if (reloc_page == NULL) {
+ drm_gem_object_unreference(target_obj);
+ return -ENOMEM;
+ }
+ }
+
+ reloc_entry = (uint32_t *)((char *)reloc_page +
+ (reloc_offset & (PAGE_SIZE - 1)));
+ reloc_val = target_obj_priv->gtt_offset + reloc.delta;
+
+ #if WATCH_BUF
+ DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
+ obj, (unsigned int) reloc.offset,
+ readl(reloc_entry), reloc_val);
+ #endif
+ writel(reloc_val, reloc_entry);
+
+ /* Write the updated presumed offset for this entry back out
+ * to the user.
+ */
+ reloc.presumed_offset = target_obj_priv->gtt_offset;
+ ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
+ if (ret != 0) {
+ drm_gem_object_unreference(target_obj);
+ return ret;
+ }
+
+ drm_gem_object_unreference(target_obj);
+ }
+
+ if (reloc_page != NULL)
+ iounmap(reloc_page);
+
+ #if WATCH_BUF
+ if (0)
+ i915_gem_dump_object(obj, 128, __func__, ~0);
+ #endif
+ return 0;
+ }
+
+ /** Dispatch a batchbuffer to the ring
+ */
+ static int
+ i915_dispatch_gem_execbuffer(struct drm_device *dev,
+ struct drm_i915_gem_execbuffer *exec,
+ uint64_t exec_offset)
+ {
- drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
+ (uintptr_t) exec->cliprects_ptr;
+ int nbox = exec->num_cliprects;
+ int i = 0, count;
+ uint32_t exec_start, exec_len;
+ RING_LOCALS;
+
+ exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+ exec_len = (uint32_t) exec->batch_len;
+
+ if ((exec_start | exec_len) & 0x7) {
+ DRM_ERROR("alignment\n");
+ return -EINVAL;
+ }
+
+ if (!exec_start)
+ return -EINVAL;
+
+ count = nbox ? nbox : 1;
+
+ for (i = 0; i < count; i++) {
+ if (i < nbox) {
+ int ret = i915_emit_box(dev, boxes, i,
+ exec->DR1, exec->DR4);
+ if (ret)
+ return ret;
+ }
+
+ if (IS_I830(dev) || IS_845G(dev)) {
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_BATCH_BUFFER);
+ OUT_RING(exec_start | MI_BATCH_NON_SECURE);
+ OUT_RING(exec_start + exec_len - 4);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ } else {
+ BEGIN_LP_RING(2);
+ if (IS_I965G(dev)) {
+ OUT_RING(MI_BATCH_BUFFER_START |
+ (2 << 6) |
+ MI_BATCH_NON_SECURE_I965);
+ OUT_RING(exec_start);
+ } else {
+ OUT_RING(MI_BATCH_BUFFER_START |
+ (2 << 6));
+ OUT_RING(exec_start | MI_BATCH_NON_SECURE);
+ }
+ ADVANCE_LP_RING();
+ }
+ }
+
+ /* XXX breadcrumb */
+ return 0;
+ }
+
+ /* Throttle our rendering by waiting until the ring has completed our requests
+ * emitted over 20 msec ago.
+ *
+ * This should get us reasonable parallelism between CPU and GPU but also
+ * relatively low latency when blocking on a particular request to finish.
+ */
+ static int
+ i915_gem_ring_throttle(struct drm_device *dev)
+ {
- drm_client_lock_release(dev);
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+ while (!list_empty(&dev_priv->mm.request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&dev_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ list);
+
+ /* Break out if we're close enough. */
+ if ((long) (jiffies - request->emitted_jiffies) <= (20 * HZ) / 1000) {
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+ }
+
+ /* Wait on the last request if not. */
+ ret = i915_wait_request(dev, request->seqno);
+ if (ret != 0) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ }
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ int
+ i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+ struct drm_i915_gem_execbuffer *args = data;
+ struct drm_i915_gem_exec_object *exec_list = NULL;
+ struct drm_gem_object **object_list = NULL;
+ struct drm_gem_object *batch_obj;
+ int ret, i;
+ uint64_t exec_offset;
+ uint32_t seqno, flush_domains;
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ #if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+ #endif
+ i915_kernel_lost_context(dev);
+
+ /* Copy in the exec list from userland */
+ exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
+ DRM_MEM_DRIVER);
+ object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
+ DRM_MEM_DRIVER);
+ if (exec_list == NULL || object_list == NULL) {
+ DRM_ERROR("Failed to allocate exec or object list "
+ "for %d buffers\n",
+ args->buffer_count);
+ ret = -ENOMEM;
+ goto pre_mutex_err;
+ }
+ ret = copy_from_user(exec_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ goto pre_mutex_err;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ /* Zero the gloabl flush/invalidate flags. These
+ * will be modified as each object is bound to the
+ * gtt
+ */
+ dev->invalidate_domains = 0;
+ dev->flush_domains = 0;
+
+ /* Look up object handles and perform the relocations */
+ for (i = 0; i < args->buffer_count; i++) {
+ object_list[i] = drm_gem_object_lookup(dev, file_priv,
+ exec_list[i].handle);
+ if (object_list[i] == NULL) {
+ DRM_ERROR("Invalid object handle %d at index %d\n",
+ exec_list[i].handle, i);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ object_list[i]->pending_read_domains = 0;
+ object_list[i]->pending_write_domain = 0;
+ ret = i915_gem_object_bind_and_relocate(object_list[i],
+ file_priv,
+ &exec_list[i]);
+ if (ret) {
+ DRM_ERROR("object bind and relocate failed %d\n", ret);
+ goto err;
+ }
+ }
+
+ /* Set the pending read domains for the batch buffer to COMMAND */
+ batch_obj = object_list[args->buffer_count-1];
+ batch_obj->pending_read_domains = DRM_GEM_DOMAIN_I915_COMMAND;
+ batch_obj->pending_write_domain = 0;
+
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_gem_object *obj = object_list[i];
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ if (obj_priv->gtt_space == NULL) {
+ /* We evicted the buffer in the process of validating
+ * our set of buffers in. We could try to recover by
+ * kicking them everything out and trying again from
+ * the start.
+ */
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* make sure all previous memory operations have passed */
+ ret = i915_gem_object_set_domain(obj,
+ obj->pending_read_domains,
+ obj->pending_write_domain);
+ if (ret)
+ goto err;
+ }
+
+ /* Flush/invalidate caches and chipset buffer */
+ flush_domains = i915_gem_dev_set_domain(dev);
+
+ #if WATCH_COHERENCY
+ for (i = 0; i < args->buffer_count; i++) {
+ i915_gem_object_check_coherency(object_list[i],
+ exec_list[i].handle);
+ }
+ #endif
+
+ exec_offset = exec_list[args->buffer_count - 1].offset;
+
+ #if WATCH_EXEC
+ i915_gem_dump_object(object_list[args->buffer_count - 1],
+ args->batch_len,
+ __func__,
+ ~0);
+ #endif
+
+ /* Exec the batchbuffer */
+ ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
+ if (ret) {
+ DRM_ERROR("dispatch failed %d\n", ret);
+ goto err;
+ }
+
+ /*
+ * Ensure that the commands in the batch buffer are
+ * finished before the interrupt fires
+ */
+ flush_domains |= i915_retire_commands(dev);
+
+ /*
+ * Get a seqno representing the execution of the current buffer,
+ * which we can wait on. We would like to mitigate these interrupts,
+ * likely by only creating seqnos occasionally (so that we have
+ * *some* interrupts representing completion of buffers that we can
+ * wait on when trying to clear up gtt space).
+ */
+ seqno = i915_add_request(dev, flush_domains);
+ BUG_ON(seqno == 0);
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_gem_object *obj = object_list[i];
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ i915_gem_object_move_to_active(obj);
+ obj_priv->last_rendering_seqno = seqno;
+ #if WATCH_LRU
+ DRM_INFO("%s: move to exec list %p\n", __func__, obj);
+ #endif
+ }
+ #if WATCH_LRU
+ i915_dump_lru(dev, __func__);
+ #endif
+
+ /* Copy the new buffer offsets back to the user's exec list. */
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ exec_list,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret)
+ DRM_ERROR("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ err:
+ if (object_list != NULL) {
+ for (i = 0; i < args->buffer_count; i++)
+ drm_gem_object_unreference(object_list[i]);
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ pre_mutex_err:
+ drm_free(object_list, sizeof(*object_list) * args->buffer_count,
+ DRM_MEM_DRIVER);
+ drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
+ DRM_MEM_DRIVER);
+
+ return ret;
+ }
+
+ int
+ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+ struct drm_i915_gem_pin *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+
+ i915_kernel_lost_context(dev);
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+ DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
+ args->handle);
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ obj_priv = obj->driver_private;
+ if (obj_priv->gtt_space == NULL) {
+ ret = i915_gem_object_bind_to_gtt(obj,
+ (unsigned) args->alignment);
+ if (ret != 0) {
+ DRM_ERROR("Failure to bind in "
+ "i915_gem_pin_ioctl(): %d\n",
+ ret);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ }
+
+ obj_priv->pin_count++;
+ args->offset = obj_priv->gtt_offset;
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+ }
+
+ int
+ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+ struct drm_i915_gem_pin *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ mutex_lock(&dev->struct_mutex);
+
+ i915_kernel_lost_context(dev);
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+ DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
+ args->handle);
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ obj_priv = obj->driver_private;
+ obj_priv->pin_count--;
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+ }
+
+ int
+ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+ struct drm_i915_gem_busy *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+ DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
+ args->handle);
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ obj_priv = obj->driver_private;
+ args->busy = obj_priv->active;
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+ }
+
+ int
+ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+ return i915_gem_ring_throttle(dev);
+ }
+
+ int i915_gem_init_object(struct drm_gem_object *obj)
+ {
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
+ if (obj_priv == NULL)
+ return -ENOMEM;
+
+ obj->driver_private = obj_priv;
+ obj_priv->obj = obj;
+ INIT_LIST_HEAD(&obj_priv->list);
+ return 0;
+ }
+
+ void i915_gem_free_object(struct drm_gem_object *obj)
+ {
+ i915_kernel_lost_context(obj->dev);
+ i915_gem_object_unbind(obj);
+
+ drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
+ }
+
+ int
+ i915_gem_set_domain(struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ uint32_t read_domains,
+ uint32_t write_domain)
+ {
+ struct drm_device *dev = obj->dev;
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ drm_client_lock_take(dev, file_priv);
+ i915_kernel_lost_context(dev);
+ ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
+ if (ret) {
- drm_client_lock_release(dev);
++ drm_client_lock_release(dev, file_priv);
+ return ret;
+ }
+ i915_gem_dev_set_domain(obj->dev);
- drm_i915_private_t *dev_priv = dev->dev_private;
++ drm_client_lock_release(dev, file_priv);
+ return 0;
+ }
+
+ int
+ i915_gem_flush_pwrite(struct drm_gem_object *obj,
+ uint64_t offset, uint64_t size)
+ {
+ #if 0
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ /*
+ * For writes much less than the size of the object and
+ * which are already pinned in memory, do the flush right now
+ */
+
+ if ((size < obj->size >> 1) && obj_priv->page_list != NULL) {
+ unsigned long first_page = offset / PAGE_SIZE;
+ unsigned long beyond_page = roundup(offset + size, PAGE_SIZE) / PAGE_SIZE;
+
+ drm_ttm_cache_flush(obj_priv->page_list + first_page,
+ beyond_page - first_page);
+ drm_agp_chipset_flush(dev);
+ obj->write_domain = 0;
+ }
+ #endif
+ return 0;
+ }
+
+ void
+ i915_gem_lastclose(struct drm_device *dev)
+ {
++ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+
+ /* Assume that the chip has been idled at this point. Just pull them
+ * off the execution list and unref them. Since this is the last
+ * close, this is also the last ref and they'll go away.
+ */
+
+ while (!list_empty(&dev_priv->mm.active_list)) {
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = list_first_entry(&dev_priv->mm.active_list,
+ struct drm_i915_gem_object,
+ list);
+
+ list_del_init(&obj_priv->list);
+ obj_priv->active = 0;
+ obj_priv->obj->write_domain = 0;
+ drm_gem_object_unreference(obj_priv->obj);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+ }
uint64_t p_size;
};
-
+ struct drm_gem_create {
+ /**
+ * Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ */
+ uint64_t size;
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ uint32_t handle;
+ uint32_t pad;
+ };
+
+ struct drm_gem_close {
+ /** Handle of the object to be closed. */
+ uint32_t handle;
+ uint32_t pad;
+ };
+
+ struct drm_gem_pread {
+ /** Handle for the object being read. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to read from */
+ uint64_t offset;
+ /** Length of data to read */
+ uint64_t size;
+ /** Pointer to write the data into. */
+ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
+ };
+
+ struct drm_gem_pwrite {
+ /** Handle for the object being written to. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to write to */
+ uint64_t offset;
+ /** Length of data to write */
+ uint64_t size;
+ /** Pointer to read the data from. */
+ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
+ };
+
+ struct drm_gem_mmap {
+ /** Handle for the object being mapped. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset in the object to map. */
+ uint64_t offset;
+ /**
+ * Length of data to map.
+ *
+ * The value will be page-aligned.
+ */
+ uint64_t size;
+ /** Returned pointer the data was mapped at */
+ uint64_t addr_ptr; /* void *, but pointers are not 32/64 compatible */
+ };
+
+ struct drm_gem_flink {
+ /** Handle for the object being named */
+ uint32_t handle;
-
+ /** Returned global name */
+ uint32_t name;
+ };
+
+ struct drm_gem_open {
+ /** Name of object being opened */
+ uint32_t name;
-
+ /** Returned handle for the object */
- uint32_t handle;
-
++ uint32_t handle;
+ /** Returned size of the object */
+ uint64_t size;
+ };
+
+ struct drm_gem_set_domain {
+ /** Handle for the object */
+ uint32_t handle;
-
+ /** New read domains */
+ uint32_t read_domains;
-
+ /** New write domain */
+ uint32_t write_domain;
+ };
-
+ #define DRM_GEM_DOMAIN_CPU 0x00000001
+/*
+ * Drm mode setting
+ */
+#define DRM_DISPLAY_INFO_LEN 32
+#define DRM_CONNECTOR_NAME_LEN 32
+#define DRM_DISPLAY_MODE_LEN 32
+#define DRM_PROP_NAME_LEN 32
+
+#define DRM_MODE_TYPE_BUILTIN (1<<0)
+#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN)
+#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN)
+#define DRM_MODE_TYPE_PREFERRED (1<<3)
+#define DRM_MODE_TYPE_DEFAULT (1<<4)
+#define DRM_MODE_TYPE_USERDEF (1<<5)
+#define DRM_MODE_TYPE_DRIVER (1<<6)
+
+struct drm_mode_modeinfo {
+ unsigned int clock;
+ unsigned short hdisplay, hsync_start, hsync_end, htotal, hskew;
+ unsigned short vdisplay, vsync_start, vsync_end, vtotal, vscan;
+
+ unsigned int vrefresh; /* vertical refresh * 1000 */
+
+ unsigned int flags;
+ unsigned int type;
+ char name[DRM_DISPLAY_MODE_LEN];
+};
+
+struct drm_mode_card_res {
+ uint64_t fb_id_ptr;
+ uint64_t crtc_id_ptr;
+ uint64_t connector_id_ptr;
+ uint64_t encoder_id_ptr;
+ int count_fbs;
+ int count_crtcs;
+ int count_connectors;
+ int count_encoders;
+ int min_width, max_width;
+ int min_height, max_height;
+ uint32_t generation;
+};
+
+struct drm_mode_crtc {
+ uint64_t set_connectors_ptr;
+
+ unsigned int crtc_id; /**< Id */
+ unsigned int fb_id; /**< Id of framebuffer */
+
+ int x, y; /**< Position on the frameuffer */
+
+ uint32_t generation;
+
+ int count_connectors;
+ unsigned int connectors; /**< Connectors that are connected */
+
+ int count_possibles;
+ unsigned int possibles; /**< Connectors that can be connected */
+ uint32_t gamma_size;
+ int mode_valid;
+ struct drm_mode_modeinfo mode;
+};
+
+struct drm_mode_get_encoder {
+
+ uint32_t generation;
+
+ uint32_t encoder_type;
+ uint32_t encoder_id;
+
+ unsigned int crtc; /**< Id of crtc */
+ uint32_t crtcs;
+ uint32_t clones;
+};
+
+#define DRM_MODE_ENCODER_NONE 0
+#define DRM_MODE_ENCODER_DAC 1
+#define DRM_MODE_ENCODER_TMDS 2
+#define DRM_MODE_ENCODER_LVDS 3
+#define DRM_MODE_ENCODER_TVDAC 4
+
+struct drm_mode_get_connector {
+
+ uint64_t encoders_ptr;
+ uint64_t modes_ptr;
+ uint64_t props_ptr;
+ uint64_t prop_values_ptr;
+
+ int count_modes;
+ int count_props;
+ int count_encoders;
+
+ unsigned int encoder; /**< Current Encoder */
+ unsigned int connector; /**< Id */
+ unsigned int connector_type;
+ unsigned int connector_type_id;
+
+ uint32_t generation;
+
+ unsigned int connection;
+ unsigned int mm_width, mm_height; /**< HxW in millimeters */
+ unsigned int subpixel;
+};
+
+#define DRM_MODE_PROP_PENDING (1<<0)
+#define DRM_MODE_PROP_RANGE (1<<1)
+#define DRM_MODE_PROP_IMMUTABLE (1<<2)
+#define DRM_MODE_PROP_ENUM (1<<3) // enumerated type with text strings
+#define DRM_MODE_PROP_BLOB (1<<4)
+
+struct drm_mode_property_enum {
+ uint64_t value;
+ unsigned char name[DRM_PROP_NAME_LEN];
+};
+
+struct drm_mode_get_property {
+ uint64_t values_ptr; /* values and blob lengths */
+ uint64_t enum_blob_ptr; /* enum and blob id ptrs */
+
+ unsigned int prop_id;
+ unsigned int flags;
+ unsigned char name[DRM_PROP_NAME_LEN];
+
+ int count_values;
+ int count_enum_blobs;
+};
+
+struct drm_mode_connector_set_property {
+ uint64_t value;
+ unsigned int prop_id;
+ unsigned int connector_id;
+};
+
+struct drm_mode_get_blob {
+ uint32_t blob_id;
+ uint32_t length;
+ uint64_t data;
+};
+
+struct drm_mode_fb_cmd {
+ unsigned int buffer_id;
+ unsigned int width, height;
+ unsigned int pitch;
+ unsigned int bpp;
+ unsigned int handle;
+ unsigned int depth;
+};
+
+struct drm_mode_mode_cmd {
+ unsigned int connector_id;
+ struct drm_mode_modeinfo mode;
+};
+
+#define DRM_MODE_CURSOR_BO 0x01
+#define DRM_MODE_CURSOR_MOVE 0x02
+
+/*
+ * depending on the value in flags diffrent members are used.
+ *
+ * CURSOR_BO uses
+ * crtc
+ * width
+ * height
+ * handle - if 0 turns the cursor of
+ *
+ * CURSOR_MOVE uses
+ * crtc
+ * x
+ * y
+ */
+struct drm_mode_cursor {
+ unsigned int flags;
+ unsigned int crtc;
+ int x;
+ int y;
+ uint32_t width;
+ uint32_t height;
+ unsigned int handle;
+};
+
+/*
+ * oh so ugly hotplug
+ */
+struct drm_mode_hotplug {
+ uint32_t counter;
+};
+
+struct drm_mode_crtc_lut {
+
+ uint32_t crtc_id;
+ uint32_t gamma_size;
+
+ uint64_t red;
+ uint64_t green;
+ uint64_t blue;
+};
+
/**
* \name Ioctls Definitions
*/
*/
int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
- u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_ring_buffer *ring = &(dev_priv->ring);
+ u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ u32 acthd_reg = IS_I965G(dev) ? I965REG_ACTHD : I915REG_ACTHD;
+ u32 last_acthd = I915_READ(acthd_reg);
+ u32 acthd;
int i;
- for (i = 0; i < 100000; i++) {
- ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+ for (i = 0; i < 10000; i++) {
+ ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ acthd = I915_READ(acthd_reg);
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
return -EBUSY;
}
-int i915_init_hardware_status(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- /* Program Hardware Status Page */
- dev_priv->status_page_dmah =
- drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
-
- if (!dev_priv->status_page_dmah) {
- DRM_ERROR("Can not allocate hardware status page\n");
- return -ENOMEM;
- }
- dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
- dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
-
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-
- I915_WRITE(0x02080, dev_priv->dma_status_page);
- DRM_DEBUG("Enabled hardware status page\n");
- return 0;
-}
-
-void i915_free_hardware_status(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- if (dev_priv->status_page_dmah) {
- drm_pci_free(dev, dev_priv->status_page_dmah);
- dev_priv->status_page_dmah = NULL;
- /* Need to rewrite hardware status page */
- I915_WRITE(0x02080, 0x1ffff000);
- }
-
- if (dev_priv->status_gfx_addr) {
- dev_priv->status_gfx_addr = 0;
- drm_core_ioremapfree(&dev_priv->hws_map, dev);
- I915_WRITE(0x02080, 0x1ffff000);
- }
-}
-
+ #if I915_RING_VALIDATE
+ /**
+ * Validate the cached ring tail value
+ *
+ * If the X server writes to the ring and DRM doesn't
+ * reload the head and tail pointers, it will end up writing
+ * data to the wrong place in the ring, causing havoc.
+ */
+ void i915_ring_validate(struct drm_device *dev, const char *func, int line)
+ {
- drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+ u32 tail = I915_READ(LP_RING+RING_TAIL) & HEAD_ADDR;
+ u32 head = I915_READ(LP_RING+RING_HEAD) & HEAD_ADDR;
+
+ if (tail != ring->tail) {
+ DRM_ERROR("%s:%d head sw %x, hw %x. tail sw %x hw %x\n",
+ func, line,
+ ring->head, head, ring->tail, tail);
+ BUG_ON(1);
+ }
+ }
+ #endif
+
void i915_kernel_lost_context(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_ring_buffer *ring = &(dev_priv->ring);
- ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
- ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
+ /* we should never lose context on the ring with modesetting
+ * as we don't expose it to userspace */
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
if (dev->irq)
drm_irq_uninstall(dev);
- if (dev_priv->ring.virtual_start) {
- drm_core_ioremapfree(&dev_priv->ring.map, dev);
- dev_priv->ring.virtual_start = 0;
- dev_priv->ring.map.handle = 0;
- dev_priv->ring.map.size = 0;
- }
-
- if (I915_NEED_GFX_HWS(dev))
- i915_free_hardware_status(dev);
+ if (dev_priv->ring.virtual_start) {
+ drm_core_ioremapfree(&dev_priv->ring.map, dev);
+ dev_priv->ring.virtual_start = 0;
+ dev_priv->ring.map.handle = 0;
+ dev_priv->ring.map.size = 0;
+ dev_priv->ring.Size = 0;
+ }
+
+ if (dev_priv->status_page_dmah) {
+ drm_pci_free(dev, dev_priv->status_page_dmah);
+ dev_priv->status_page_dmah = NULL;
+ /* Need to rewrite hardware status page */
+ I915_WRITE(0x02080, 0x1ffff000);
+ }
+
+ if (dev_priv->status_gfx_addr) {
+ dev_priv->status_gfx_addr = 0;
+ drm_core_ioremapfree(&dev_priv->hws_map, dev);
+ I915_WRITE(0x02080, 0x1ffff000);
+ }
-
return 0;
}
struct drm_file *file_priv,
drm_i915_init_t * init)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
-#if defined(I915_HAVE_BUFFER)
- int ret;
-#endif
- dev_priv->sarea = drm_getsarea(dev);
- if (!dev_priv->sarea) {
- DRM_ERROR("can not find sarea!\n");
- i915_dma_cleanup(dev);
- return -EINVAL;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ if (init->mmio_offset != 0)
+ dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
+ if (!dev_priv->mmio_map) {
+ i915_dma_cleanup(dev);
+ DRM_ERROR("can not find mmio map!\n");
+ return -EINVAL;
+ }
}
-
#ifdef I915_HAVE_BUFFER
- dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
-#endif
-
- if (init->sarea_priv_offset)
- dev_priv->sarea_priv = (drm_i915_sarea_t *)
- ((u8 *) dev_priv->sarea->handle +
- init->sarea_priv_offset);
- else {
- /* No sarea_priv for you! */
- dev_priv->sarea_priv = NULL;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
}
+#endif
- dev_priv->ring.Start = init->ring_start;
- dev_priv->ring.End = init->ring_end;
- dev_priv->ring.Size = init->ring_size;
- dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
-
- dev_priv->ring.map.offset = init->ring_start;
- dev_priv->ring.map.size = init->ring_size;
- dev_priv->ring.map.type = 0;
- dev_priv->ring.map.flags = 0;
- dev_priv->ring.map.mtrr = 0;
-
- drm_core_ioremap(&dev_priv->ring.map, dev);
-
- if (dev_priv->ring.map.handle == NULL) {
- i915_dma_cleanup(dev);
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- return -ENOMEM;
+ if (!dev_priv->ring.Size) {
+ dev_priv->ring.Start = init->ring_start;
+ dev_priv->ring.End = init->ring_end;
+ dev_priv->ring.Size = init->ring_size;
+ dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
+
+ dev_priv->ring.map.offset = init->ring_start;
+ dev_priv->ring.map.size = init->ring_size;
+ dev_priv->ring.map.type = 0;
+ dev_priv->ring.map.flags = 0;
+ dev_priv->ring.map.mtrr = 0;
+
+ drm_core_ioremap(&dev_priv->ring.map, dev);
+
+ if (dev_priv->ring.map.handle == NULL) {
+ i915_dma_cleanup(dev);
+ DRM_ERROR("can not ioremap virtual address for"
+ " ring buffer\n");
+ return -ENOMEM;
+ }
+ dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
}
- dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
dev_priv->cpp = init->cpp;
-
- if (dev_priv->sarea_priv)
- dev_priv->sarea_priv->pf_current_page = 0;
+ master_priv->sarea_priv->pf_current_page = 0;
/* We are using separate values as placeholders for mechanisms for
* private backbuffer/depthbuffer usage.
*/
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
+ /* Program Hardware Status Page */
+ if (!I915_NEED_GFX_HWS(dev)) {
+ dev_priv->status_page_dmah =
+ drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
+
+ if (!dev_priv->status_page_dmah) {
+ i915_dma_cleanup(dev);
+ DRM_ERROR("Can not allocate hardware status page\n");
+ return -ENOMEM;
+ }
+ dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+ dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
+
+ memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+
+ I915_WRITE(0x02080, dev_priv->dma_status_page);
+ }
+ DRM_DEBUG("Enabled hardware status page\n");
+
#ifdef I915_HAVE_BUFFER
- mutex_init(&dev_priv->cmdbuf_mutex);
-#endif
-#if defined(I915_HAVE_BUFFER)
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ mutex_init(&dev_priv->cmdbuf_mutex);
+ }
- #endif
- #if defined(I915_HAVE_BUFFER)
++
if (init->func == I915_INIT_DMA2) {
- ret = setup_dri2_sarea(dev, file_priv, init);
+ int ret = setup_dri2_sarea(dev, file_priv, init);
if (ret) {
i915_dma_cleanup(dev);
DRM_ERROR("could not set up dri2 sarea\n");
DRM_DEBUG("\n");
- if (!dev_priv->sarea) {
- DRM_ERROR("can not find sarea!\n");
- return -EINVAL;
- }
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
- if (!dev_priv->mmio_map) {
- DRM_ERROR("can not find mmio map!\n");
- return -EINVAL;
- }
-
if (dev_priv->ring.map.handle == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return 0;
}
- static int i915_emit_box(struct drm_device * dev,
- struct drm_clip_rect __user * boxes,
- int i, int DR1, int DR4)
+ int i915_emit_box(struct drm_device * dev,
+ struct drm_clip_rect __user * boxes,
+ int i, int DR1, int DR4)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_clip_rect box;
RING_LOCALS;
DRM_DEBUG("Breadcrumb counter wrapped around\n");
}
- if (dev_priv->sarea_priv)
- dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
+ master_priv->sarea_priv->last_enqueue = dev_priv->counter;
BEGIN_LP_RING(4);
- OUT_RING(CMD_STORE_DWORD_IDX);
- OUT_RING(5 << STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(20);
++ OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
int i915_quiescent(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
i915_kernel_lost_context(dev);
- return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
+ ret = i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
+ if (ret)
+ {
+ i915_kernel_lost_context (dev);
+ DRM_ERROR ("not quiescent head %08x tail %08x space %08x\n",
+ dev_priv->ring.head,
+ dev_priv->ring.tail,
+ dev_priv->ring.space);
+ }
+ return ret;
}
static int i915_flush_ioctl(struct drm_device *dev, void *data,
unsigned int plane;
unsigned int sequence;
int flip;
-} drm_i915_vbl_swap_t;
-
-typedef struct drm_i915_private {
- struct drm_device *dev;
+ struct drm_minor *minor;
+};
+struct drm_i915_master_private {
drm_local_map_t *sarea;
+ struct drm_i915_sarea *sarea_priv;
+};
+
+struct drm_i915_private {
++ struct drm_device *dev;
++
+ struct drm_buffer_object *ring_buffer;
+
drm_local_map_t *mmio_map;
- drm_i915_sarea_t *sarea_priv;
- drm_i915_ring_buffer_t ring;
+ unsigned long mmiobase;
+ unsigned long mmiolen;
+
+ struct drm_i915_ring_buffer ring;
- drm_dma_handle_t *status_page_dmah;
+ struct drm_dma_handle *status_page_dmah;
void *hw_status_page;
dma_addr_t dma_status_page;
uint32_t counter;
DRM_SPINTYPE user_irq_lock;
int user_irq_refcount;
int fence_irq_on;
- uint32_t irq_enable_reg;
+ uint32_t irq_mask_reg;
int irq_enabled;
+ struct workqueue_struct *wq;
+
+ bool cursor_needs_physical;
#ifdef I915_HAVE_FENCE
uint32_t flush_sequence;
/* DRI2 sarea */
struct drm_buffer_object *sarea_bo;
struct drm_bo_kmap_obj sarea_kmap;
+
+ /* Feature bits from the VBIOS */
+ int int_tv_support:1;
+ int lvds_dither:1;
+ int lvds_vbt:1;
+ int int_crt_support:1;
#endif
+
++ struct {
++ struct drm_memrange gtt_space;
++
++ /**
++ * List of objects currently involved in rendering from the
++ * ringbuffer.
++ *
++ * A reference is held on the buffer while on this list.
++ */
++ struct list_head active_list;
++
++ /**
++ * List of objects which are not in the ringbuffer but which
++ * still have a write_domain which needs to be flushed before
++ * unbinding.
++ *
++ * A reference is held on the buffer while on this list.
++ */
++ struct list_head flushing_list;
++
++ /**
++ * LRU list of objects which are not in the ringbuffer and
++ * are ready to unbind, but are still in the GTT.
++ *
++ * A reference is not held on the buffer while on this list,
++ * as merely being GTT-bound shouldn't prevent its being
++ * freed, and we'll pull it off the list in the free path.
++ */
++ struct list_head inactive_list;
++
++ /**
++ * List of breadcrumbs associated with GPU requests currently
++ * outstanding.
++ */
++ struct list_head request_list;
++
++ /**
++ * We leave the user IRQ off as much as possible,
++ * but this means that requests will finish and never
++ * be retired once the system goes idle. Set a timer to
++ * fire periodically while the ring is running. When it
++ * fires, go retire requests.
++ */
++ struct timer_list retire_timer;
++ struct work_struct retire_task;
++
++ uint32_t next_gem_seqno;
++ } mm;
++
++ struct work_struct user_interrupt_task;
++
/* Register state */
u8 saveLBB;
u32 saveDSPACNTR;
extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_emit_irq(struct drm_device * dev);
+extern void i915_enable_interrupt (struct drm_device *dev);
+ extern int i915_wait_irq(struct drm_device * dev, int irq_nr);
extern int i915_enable_vblank(struct drm_device *dev, int crtc);
extern void i915_disable_vblank(struct drm_device *dev, int crtc);
extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern void i915_user_irq_on(drm_i915_private_t *dev_priv);
-extern void i915_user_irq_off(drm_i915_private_t *dev_priv);
+extern void i915_user_irq_on(struct drm_device *dev);
+extern void i915_user_irq_off(struct drm_device *dev);
+ extern void i915_user_interrupt_handler(struct work_struct *work);
/* i915_mem.c */
extern int i915_mem_alloc(struct drm_device *dev, void *data,
/* i915_execbuf.c */
int i915_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-
+ /* i915_gem.c */
+ int i915_gem_init_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+ int i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+ int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+ int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+ int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+ int i915_gem_init_object(struct drm_gem_object *obj);
+ void i915_gem_free_object(struct drm_gem_object *obj);
+ int i915_gem_set_domain(struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ uint32_t read_domains,
+ uint32_t write_domain);
+ int i915_gem_flush_pwrite(struct drm_gem_object *obj,
+ uint64_t offset, uint64_t size);
+ void i915_gem_lastclose(struct drm_device *dev);
+ void i915_gem_retire_requests(struct drm_device *dev);
+ void i915_gem_retire_timeout(unsigned long data);
+ void i915_gem_retire_handler(struct work_struct *work);
#endif
+extern unsigned int i915_fbpercrtc;
+
#ifdef __linux__
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
extern void intel_init_chipset_flush_compat(struct drm_device *dev);
#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
#define I915_VERBOSE 0
+ #define I915_RING_VALIDATE 0
+#define PRIMARY_RINGBUFFER_SIZE (128*1024)
+
#define RING_LOCALS unsigned int outring, ringmask, outcount; \
volatile char *virt;
#define ADVANCE_LP_RING() do { \
if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \
+ I915_RING_DO_VALIDATE(dev); \
dev_priv->ring.tail = outring; \
dev_priv->ring.space -= outcount * 4; \
- I915_WRITE(LP_RING + RING_TAIL, outring); \
+ I915_WRITE(PRB0_TAIL, outring); \
} while(0)
+#define BREADCRUMB_BITS 31
+#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
+
+#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
+#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
+
extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
-/* Extended config space */
-#define LBB 0xf4
+/*
+ * The Bridge device's PCI config space has information about the
+ * fb aperture size and the amount of pre-reserved memory.
+ */
+#define INTEL_GMCH_CTRL 0x52
+#define INTEL_GMCH_ENABLED 0x4
+#define INTEL_GMCH_MEM_MASK 0x1
+#define INTEL_GMCH_MEM_64M 0x1
+#define INTEL_GMCH_MEM_128M 0
+
+#define INTEL_855_GMCH_GMS_MASK (0x7 << 4)
+#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4)
+
+#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
+#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
+
+/* PCI config space */
+
+#define HPLLCC 0xc0 /* 855 only */
+#define GC_CLOCK_CONTROL_MASK (3 << 0)
+#define GC_CLOCK_133_200 (0 << 0)
+#define GC_CLOCK_100_200 (1 << 0)
+#define GC_CLOCK_100_133 (2 << 0)
+#define GC_CLOCK_166_250 (3 << 0)
+#define GCFGC 0xf0 /* 915+ only */
+#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
+#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
+#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4)
+#define GC_DISPLAY_CLOCK_MASK (7 << 4)
+#define LBB 0xf4
/* VGA stuff */
#define VGA_CR_INDEX_CGA 0x3d4
#define VGA_CR_DATA_CGA 0x3d5
-#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
-#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
-#define CMD_REPORT_HEAD (7<<23)
-#define CMD_STORE_DWORD_IMM ((0x20<<23) | (0x1 << 22) | 0x1)
+/*
+ * Memory interface instructions used by the kernel
+ */
+#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
+
+#define MI_NOOP MI_INSTR(0, 0)
+#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
+#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0)
+#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
+#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
+#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
+#define MI_FLUSH MI_INSTR(0x04, 0)
+#define MI_READ_FLUSH (1 << 0)
+#define MI_EXE_FLUSH (1 << 1)
+#define MI_NO_WRITE_FLUSH (1 << 2)
+#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
+#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
+#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
+#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
+#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
+#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) /* used to have 1<<22? */
+#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
++#define MI_STORE_DWORD_INDEX_SHIFT 2
+#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
+#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
+#define MI_BATCH_NON_SECURE (1)
+#define MI_BATCH_NON_SECURE_I965 (1<<8)
+#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
+
++#define BREADCRUMB_BITS 31
++#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
++
++#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
++
+ /**
- * Stores a 32-bit integer to the status page at the dword index given.
++ * Reads a dword out of the status page, which is written to from the command
++ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
++ * MI_STORE_DATA_IMM.
++ *
++ * The following dwords have a reserved meaning:
++ * 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
++ * 4: ring 0 head pointer
++ * 5: ring 1 head pointer (915-class)
++ * 6: ring 2 head pointer (915-class)
++ *
++ * The area from dword 0x10 to 0x3ff is available for driver usage.
+ */
-#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
-# define STORE_DWORD_INDEX_SHIFT 2
++#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
++#define I915_GEM_HWS_INDEX 0x10
+
-#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
+/*
+ * 3D instructions used by the kernel
+ */
+#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
-#define CMD_MI_FLUSH (0x04 << 23)
-#define MI_NO_WRITE_FLUSH (1 << 2)
-#define MI_READ_FLUSH (1 << 0)
-#define MI_EXE_FLUSH (1 << 1)
-#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
-#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
++#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
+#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
+#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define SC_UPDATE_SCISSOR (0x1<<1)
+#define SC_ENABLE_MASK (0x1<<0)
+#define SC_ENABLE (0x1<<0)
+#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
+#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
+#define SCI_YMIN_MASK (0xffff<<16)
+#define SCI_XMIN_MASK (0xffff<<0)
+#define SCI_YMAX_MASK (0xffff<<16)
+#define SCI_XMAX_MASK (0xffff<<0)
+#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
+#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
+#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
+#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
+#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
+#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
+#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
+#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
+#define BLT_DEPTH_8 (0<<24)
+#define BLT_DEPTH_16_565 (1<<24)
+#define BLT_DEPTH_16_1555 (2<<24)
+#define BLT_DEPTH_32 (3<<24)
+#define BLT_ROP_GXCOPY (0xcc<<16)
+#define XY_SRC_COPY_BLT_SRC_TILED (1<<15)
+#define XY_SRC_COPY_BLT_DST_TILED (1<<11)
+#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
+#define ASYNC_FLIP (1<<22)
+#define DISPLAY_PLANE_A (0<<20)
+#define DISPLAY_PLANE_B (1<<20)
-/* Packet to load a register value from the ring/batch command stream:
+/*
+ * Instruction and interrupt control regs
*/
-#define CMD_MI_LOAD_REGISTER_IMM ((0x22 << 23)|0x1)
-#define BB1_START_ADDR_MASK (~0x7)
-#define BB1_PROTECTED (1<<0)
-#define BB1_UNPROTECTED (0<<0)
-#define BB2_END_ADDR_MASK (~0x7)
+#define PRB0_TAIL 0x02030
+#define PRB0_HEAD 0x02034
+#define PRB0_START 0x02038
+#define PRB0_CTL 0x0203c
+#define TAIL_ADDR 0x001FFFF8
+#define HEAD_WRAP_COUNT 0xFFE00000
+#define HEAD_WRAP_ONE 0x00200000
+#define HEAD_ADDR 0x001FFFFC
+#define RING_NR_PAGES 0x001FF000
+#define RING_REPORT_MASK 0x00000006
+#define RING_REPORT_64K 0x00000002
+#define RING_REPORT_128K 0x00000004
+#define RING_NO_REPORT 0x00000000
+#define RING_VALID_MASK 0x00000001
+#define RING_VALID 0x00000001
+#define RING_INVALID 0x00000000
+#define PRB1_TAIL 0x02040 /* 915+ only */
+#define PRB1_HEAD 0x02044 /* 915+ only */
+#define PRB1_START 0x02048 /* 915+ only */
+#define PRB1_CTL 0x0204c /* 915+ only */
++#define I965REG_ACTHD 0x02074
+#define HWS_PGA 0x02080
+#define IPEIR 0x02088
+#define NOPID 0x02094
+#define HWSTAM 0x02098
+#define SCPD0 0x0209c /* 915+ only */
+#define IER 0x020a0
+#define IIR 0x020a4
+#define IMR 0x020a8
+#define ISR 0x020ac
+#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
+#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
+#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14)
+#define I915_HWB_OOM_INTERRUPT (1<<13)
+#define I915_SYNC_STATUS_INTERRUPT (1<<12)
+#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
+#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
+#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
+#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
+#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
+#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
+#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
+#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
+#define I915_DEBUG_INTERRUPT (1<<2)
+#define I915_USER_INTERRUPT (1<<1)
+#define EIR 0x020b0
+#define EMR 0x020b4
+#define ESR 0x020b8
+#define INSTPM 0x020c0
++#define I915REG_ACTHD 0x020C8
+#define FW_BLC 0x020d8
+#define FW_BLC_SELF 0x020e0 /* 915+ only */
+#define MI_ARB_STATE 0x020e4 /* 915+ only */
+#define CACHE_MODE_0 0x02120 /* 915+ only */
+#define CM0_MASK_SHIFT 16
+#define CM0_IZ_OPT_DISABLE (1<<6)
+#define CM0_ZR_OPT_DISABLE (1<<5)
+#define CM0_DEPTH_EVICT_DISABLE (1<<4)
+#define CM0_COLOR_EVICT_DISABLE (1<<3)
+#define CM0_DEPTH_WRITE_DISABLE (1<<1)
+#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
+#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
+
+/*
+ * Framebuffer compression (915+ only)
+ */
-/* Framebuffer compression */
#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
#define FBC_LL_BASE 0x03204 /* 4k page aligned */
#define FBC_CONTROL 0x03208
--- /dev/null
- drm_mm_print(&dev->bm.man[DRM_BO_MEM_VRAM].manager, "VRAM");
- drm_mm_print(&dev->bm.man[DRM_BO_MEM_TT].manager, "TT");
-
+/*
+ * Copyright (c) 2007 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Copyright © 2002, 2003 David Dawes <dawes@xfree86.org>
+ * 2004 Sylvain Meyer
+ *
+ * GPL/BSD dual license
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sarea.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "intel_bios.h"
+#include "intel_drv.h"
+
+/**
+ * i915_probe_agp - get AGP bootup configuration
+ * @pdev: PCI device
+ * @aperture_size: returns AGP aperture configured size
+ * @preallocated_size: returns size of BIOS preallocated AGP space
+ *
+ * Since Intel integrated graphics are UMA, the BIOS has to set aside
+ * some RAM for the framebuffer at early boot. This code figures out
+ * how much was set aside so we can use it for our own purposes.
+ */
+int i915_probe_agp(struct pci_dev *pdev, unsigned long *aperture_size,
+ unsigned long *preallocated_size)
+{
+ struct pci_dev *bridge_dev;
+ u16 tmp = 0;
+ unsigned long overhead;
+
+ bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
+ if (!bridge_dev) {
+ DRM_ERROR("bridge device not found\n");
+ return -1;
+ }
+
+ /* Get the fb aperture size and "stolen" memory amount. */
+ pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp);
+ pci_dev_put(bridge_dev);
+
+ *aperture_size = 1024 * 1024;
+ *preallocated_size = 1024 * 1024;
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_82830_CGC:
+ case PCI_DEVICE_ID_INTEL_82845G_IG:
+ case PCI_DEVICE_ID_INTEL_82855GM_IG:
+ case PCI_DEVICE_ID_INTEL_82865_IG:
+ if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
+ *aperture_size *= 64;
+ else
+ *aperture_size *= 128;
+ break;
+ default:
+ /* 9xx supports large sizes, just look at the length */
+ *aperture_size = pci_resource_len(pdev, 2);
+ break;
+ }
+
+ /*
+ * Some of the preallocated space is taken by the GTT
+ * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
+ */
+ overhead = (*aperture_size / 1024) + 4096;
+ switch (tmp & INTEL_855_GMCH_GMS_MASK) {
+ case INTEL_855_GMCH_GMS_STOLEN_1M:
+ break; /* 1M already */
+ case INTEL_855_GMCH_GMS_STOLEN_4M:
+ *preallocated_size *= 4;
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_8M:
+ *preallocated_size *= 8;
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_16M:
+ *preallocated_size *= 16;
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_32M:
+ *preallocated_size *= 32;
+ break;
+ case INTEL_915G_GMCH_GMS_STOLEN_48M:
+ *preallocated_size *= 48;
+ break;
+ case INTEL_915G_GMCH_GMS_STOLEN_64M:
+ *preallocated_size *= 64;
+ break;
+ case INTEL_855_GMCH_GMS_DISABLED:
+ DRM_ERROR("video memory is disabled\n");
+ return -1;
+ default:
+ DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
+ tmp & INTEL_855_GMCH_GMS_MASK);
+ return -1;
+ }
+ *preallocated_size -= overhead;
+
+ return 0;
+}
+
+int i915_load_modeset_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long agp_size, prealloc_size;
+ int size, ret = 0;
+
+ i915_probe_agp(dev->pdev, &agp_size, &prealloc_size);
+ printk("setting up %ld bytes of VRAM space\n", prealloc_size);
+ printk("setting up %ld bytes of TT space\n", (agp_size - prealloc_size));
+
+ drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, prealloc_size >> PAGE_SHIFT, 1);
+ drm_bo_init_mm(dev, DRM_BO_MEM_TT, prealloc_size >> PAGE_SHIFT,
+ (agp_size - prealloc_size) >> PAGE_SHIFT, 1);
+ I915_WRITE(PRB0_CTL, 0);
+ I915_WRITE(PRB0_HEAD, 0);
+ I915_WRITE(PRB0_TAIL, 0);
+
+ size = PRIMARY_RINGBUFFER_SIZE;
+ ret = drm_buffer_object_create(dev, size, drm_bo_type_kernel,
+ DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
+ DRM_BO_FLAG_MEM_VRAM |
+ DRM_BO_FLAG_NO_EVICT,
+ DRM_BO_HINT_DONT_FENCE, 0x1, 0,
+ &dev_priv->ring_buffer);
+ if (ret < 0) {
+ DRM_ERROR("Unable to allocate or pin ring buffer\n");
+ goto clean_mm;
+ }
+
+ /* remap the buffer object properly */
+ dev_priv->ring.Start = dev_priv->ring_buffer->offset;
+ dev_priv->ring.End = dev_priv->ring.Start + size;
+ dev_priv->ring.Size = size;
+ dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
+
+ /* FIXME: need wrapper with PCI mem checks */
+ ret = drm_mem_reg_ioremap(dev, &dev_priv->ring_buffer->mem,
+ (void **) &dev_priv->ring.virtual_start);
+ if (ret) {
+ DRM_ERROR("error mapping ring buffer: %d\n", ret);
+ goto destroy_ringbuffer;
+ }
+
+ DRM_DEBUG("ring start %08lX, %p, %08lX\n", dev_priv->ring.Start,
+ dev_priv->ring.virtual_start, dev_priv->ring.Size);
+
+ memset((void *)(dev_priv->ring.virtual_start), 0, dev_priv->ring.Size);
+ I915_WRITE(PRB0_START, dev_priv->ring.Start);
+ I915_WRITE(PRB0_CTL, ((dev_priv->ring.Size - 4096) & RING_NR_PAGES) |
+ (RING_NO_REPORT | RING_VALID));
+
+ /* Allow hardware batchbuffers unless told otherwise.
+ */
+ dev_priv->allow_batchbuffer = 1;
+ dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
+ mutex_init(&dev_priv->cmdbuf_mutex);
+
+ /* Program Hardware Status Page */
+ if (!IS_G33(dev)) {
+ dev_priv->status_page_dmah =
+ drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
+
+ if (!dev_priv->status_page_dmah) {
+ DRM_ERROR("Can not allocate hardware status page\n");
+ ret = -ENOMEM;
+ goto destroy_ringbuffer;
+ }
+ dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+ dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
+
+ memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+
+ I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+ } else {
+ size = 4 * 1024;
+ ret = drm_buffer_object_create(dev, size,
+ drm_bo_type_kernel,
+ DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
+ DRM_BO_FLAG_MEM_VRAM |
+ DRM_BO_FLAG_NO_EVICT,
+ DRM_BO_HINT_DONT_FENCE, 0x1, 0,
+ &dev_priv->hws_bo);
+ if (ret < 0) {
+ DRM_ERROR("Unable to allocate or pin hw status page\n");
+ ret = -EINVAL;
+ goto destroy_ringbuffer;
+ }
+
+ dev_priv->status_gfx_addr =
+ dev_priv->hws_bo->offset & (0x1ffff << 12);
+ dev_priv->hws_map.offset = dev->agp->base +
+ dev_priv->hws_bo->offset;
+ dev_priv->hws_map.size = size;
+ dev_priv->hws_map.type= 0;
+ dev_priv->hws_map.flags= 0;
+ dev_priv->hws_map.mtrr = 0;
+
+ drm_core_ioremap(&dev_priv->hws_map, dev);
+ if (dev_priv->hws_map.handle == NULL) {
+ dev_priv->status_gfx_addr = 0;
+ DRM_ERROR("can not ioremap virtual addr for"
+ "G33 hw status page\n");
+ ret = -ENOMEM;
+ goto destroy_hws;
+ }
+ dev_priv->hw_status_page = dev_priv->hws_map.handle;
+ memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+ }
+ DRM_DEBUG("Enabled hardware status page\n");
+
+ dev_priv->wq = create_singlethread_workqueue("i915");
+ if (dev_priv->wq == 0) {
+ DRM_DEBUG("Error\n");
+ ret = -EINVAL;
+ goto destroy_hws;
+ }
+
+ ret = intel_init_bios(dev);
+ if (ret) {
+ DRM_ERROR("failed to find VBIOS tables\n");
+ ret = -ENODEV;
+ goto destroy_wq;
+ }
+
+ intel_modeset_init(dev);
+ drm_helper_initial_config(dev, false);
+
- // dev_priv->flags = flags;
+ dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL);
+ if (!dev->devname) {
+ ret = -ENOMEM;
+ goto modeset_cleanup;
+ }
+
+ ret = drm_irq_install(dev);
+ if (ret) {
+ kfree(dev->devname);
+ goto modeset_cleanup;
+ }
+ return 0;
+
+modeset_cleanup:
+ intel_modeset_cleanup(dev);
+destroy_wq:
+ destroy_workqueue(dev_priv->wq);
+destroy_hws:
+ if (!IS_G33(dev)) {
+ if (dev_priv->status_page_dmah)
+ drm_pci_free(dev, dev_priv->status_page_dmah);
+ } else {
+ if (dev_priv->hws_map.handle)
+ drm_core_ioremapfree(&dev_priv->hws_map, dev);
+ if (dev_priv->hws_bo)
+ drm_bo_usage_deref_unlocked(&dev_priv->hws_bo);
+ }
+ I915_WRITE(HWS_PGA, 0x1ffff000);
+destroy_ringbuffer:
+ if (dev_priv->ring.virtual_start)
+ drm_mem_reg_iounmap(dev, &dev_priv->ring_buffer->mem,
+ dev_priv->ring.virtual_start);
+ if (dev_priv->ring_buffer)
+ drm_bo_usage_deref_unlocked(&dev_priv->ring_buffer);
+clean_mm:
+ drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM, 1);
+ drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1);
+ return ret;
+}
+
+/**
+ * i915_driver_load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ * - drive output discovery via intel_modeset_init()
+ * - initialize the memory manager
+ * - allocate initial config memory
+ * - setup the DRM framebuffer with the allocated memory
+ */
+int i915_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct drm_i915_private *dev_priv;
+ int ret = 0;
+
+ dev_priv = drm_alloc(sizeof(struct drm_i915_private), DRM_MEM_DRIVER);
+ if (dev_priv == NULL)
+ return -ENOMEM;
+
+ memset(dev_priv, 0, sizeof(struct drm_i915_private));
+ dev->dev_private = (void *)dev_priv;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
++ dev_priv->dev = dev;
+
+ /* i915 has 4 more counters */
+ dev->counters += 4;
+ dev->types[6] = _DRM_STAT_IRQ;
+ dev->types[7] = _DRM_STAT_PRIMARY;
+ dev->types[8] = _DRM_STAT_SECONDARY;
+ dev->types[9] = _DRM_STAT_DMA;
+
+ if (IS_MOBILE(dev) || IS_I9XX(dev))
+ dev_priv->cursor_needs_physical = true;
+ else
+ dev_priv->cursor_needs_physical = false;
+
+ if (IS_I965G(dev) || IS_G33(dev))
+ dev_priv->cursor_needs_physical = false;
+
+ if (IS_I9XX(dev)) {
+ pci_read_config_dword(dev->pdev, 0x5C, &dev_priv->stolen_base);
+ DRM_DEBUG("stolen base %p\n", (void*)dev_priv->stolen_base);
+ }
+
+ if (IS_I9XX(dev)) {
+ dev_priv->mmiobase = drm_get_resource_start(dev, 0);
+ dev_priv->mmiolen = drm_get_resource_len(dev, 0);
+ dev->mode_config.fb_base =
+ drm_get_resource_start(dev, 2) & 0xff000000;
+ } else if (drm_get_resource_start(dev, 1)) {
+ dev_priv->mmiobase = drm_get_resource_start(dev, 1);
+ dev_priv->mmiolen = drm_get_resource_len(dev, 1);
+ dev->mode_config.fb_base =
+ drm_get_resource_start(dev, 0) & 0xff000000;
+ } else {
+ DRM_ERROR("Unable to find MMIO registers\n");
+ ret = -ENODEV;
+ goto free_priv;
+ }
+
+ DRM_DEBUG("fb_base: 0x%08lx\n", dev->mode_config.fb_base);
+
+ ret = drm_addmap(dev, dev_priv->mmiobase, dev_priv->mmiolen,
+ _DRM_REGISTERS, _DRM_KERNEL|_DRM_READ_ONLY|_DRM_DRIVER,
+ &dev_priv->mmio_map);
+ if (ret != 0) {
+ DRM_ERROR("Cannot add mapping for MMIO registers\n");
+ goto free_priv;
+ }
+
++ INIT_LIST_HEAD(&dev_priv->mm.active_list);
++ INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
++ INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
++ INIT_LIST_HEAD(&dev_priv->mm.request_list);
++ dev_priv->mm.retire_timer.function = i915_gem_retire_timeout;
++ dev_priv->mm.retire_timer.data = (unsigned long) dev;
++ init_timer_deferrable (&dev_priv->mm.retire_timer);
++ INIT_WORK(&dev_priv->mm.retire_task,
++ i915_gem_retire_handler);
++ INIT_WORK(&dev_priv->user_interrupt_task,
++ i915_user_interrupt_handler);
++ dev_priv->mm.next_gem_seqno = 1;
++
+#ifdef __linux__
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+ intel_init_chipset_flush_compat(dev);
+#endif
+#endif
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /*
+ * Initialize the memory manager for local and AGP space
+ */
+ ret = drm_bo_driver_init(dev);
+ if (ret) {
+ DRM_ERROR("fail to init memory manager for "
+ "local & AGP space\n");
+ goto out_rmmap;
+ }
+
+ ret = i915_load_modeset_init(dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to init modeset\n");
+ goto driver_fini;
+ }
+ }
+ return 0;
+
+driver_fini:
+ drm_bo_driver_finish(dev);
+out_rmmap:
+ drm_rmmap(dev, dev_priv->mmio_map);
+free_priv:
+ drm_free(dev_priv, sizeof(struct drm_i915_private), DRM_MEM_DRIVER);
+ return ret;
+}
+
+int i915_driver_unload(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(PRB0_CTL, 0);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_irq_uninstall(dev);
+ intel_modeset_cleanup(dev);
+ destroy_workqueue(dev_priv->wq);
+ }
+
+#if 0
+ if (dev_priv->ring.virtual_start) {
+ drm_core_ioremapfree(&dev_priv->ring.map, dev);
+ }
+#endif
+ if (dev_priv->sarea_kmap.virtual) {
+ drm_bo_kunmap(&dev_priv->sarea_kmap);
+ dev_priv->sarea_kmap.virtual = NULL;
+ dev->sigdata.lock = NULL;
+ }
+
+ if (dev_priv->sarea_bo) {
+ mutex_lock(&dev->struct_mutex);
+ drm_bo_usage_deref_locked(&dev_priv->sarea_bo);
+ mutex_unlock(&dev->struct_mutex);
+ dev_priv->sarea_bo = NULL;
+ }
+
+ if (dev_priv->status_page_dmah) {
+ drm_pci_free(dev, dev_priv->status_page_dmah);
+ dev_priv->status_page_dmah = NULL;
+ dev_priv->hw_status_page = NULL;
+ dev_priv->dma_status_page = 0;
+ /* Need to rewrite hardware status page */
+ I915_WRITE(HWS_PGA, 0x1ffff000);
+ }
+
+ if (dev_priv->status_gfx_addr) {
+ dev_priv->status_gfx_addr = 0;
+ drm_core_ioremapfree(&dev_priv->hws_map, dev);
+ drm_bo_usage_deref_unlocked(&dev_priv->hws_bo);
+ I915_WRITE(HWS_PGA, 0x1ffff000);
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_mem_reg_iounmap(dev, &dev_priv->ring_buffer->mem,
+ dev_priv->ring.virtual_start);
+
+ DRM_DEBUG("usage is %d\n", atomic_read(&dev_priv->ring_buffer->usage));
+ mutex_lock(&dev->struct_mutex);
+ drm_bo_usage_deref_locked(&dev_priv->ring_buffer);
+
+ if (drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1)) {
+ DRM_ERROR("Memory manager type 3 not clean. "
+ "Delaying takedown\n");
+ }
+ if (drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM, 1)) {
+ DRM_ERROR("Memory manager type 3 not clean. "
+ "Delaying takedown\n");
+ }
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ drm_bo_driver_finish(dev);
+
+#ifdef __linux__
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+ intel_init_chipset_flush_compat(dev);
+#endif
+#endif
+
+ DRM_DEBUG("%p\n", dev_priv->mmio_map);
+ drm_rmmap(dev, dev_priv->mmio_map);
+
+ drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
+
+ dev->dev_private = NULL;
+ return 0;
+}
+
+int i915_master_create(struct drm_device *dev, struct drm_master *master)
+{
+ struct drm_i915_master_private *master_priv;
+ unsigned long sareapage;
+ int ret;
+
+ master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER);
+ if (!master_priv)
+ return -ENOMEM;
+
+ /* prebuild the SAREA */
+ sareapage = max(SAREA_MAX, PAGE_SIZE);
+ ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK|_DRM_DRIVER,
+ &master_priv->sarea);
+ if (ret) {
+ DRM_ERROR("SAREA setup failed\n");
+ return ret;
+ }
+ master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
+ master_priv->sarea_priv->pf_current_page = 0;
+
+ master->driver_priv = master_priv;
+ return 0;
+}
+
+void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
+{
+ struct drm_i915_master_private *master_priv = master->driver_priv;
+
+ if (!master_priv)
+ return;
+
+ if (master_priv->sarea)
+ drm_rmmap(dev, master_priv->sarea);
+
+ drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
+
+ master->driver_priv = NULL;
+}
+
+void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
++ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_mem_release(dev, file_priv, dev_priv->agp_heap);
+}
+
+void i915_driver_lastclose(struct drm_device * dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
++#ifdef I915_HAVE_BUFFER
++ if (dev_priv->val_bufs) {
++ vfree(dev_priv->val_bufs);
++ dev_priv->val_bufs = NULL;
++ }
++#endif
++
++ i915_gem_lastclose(dev);
++
+ if (dev_priv->agp_heap)
+ i915_mem_takedown(&(dev_priv->agp_heap));
++
++#if defined(I915_HAVE_BUFFER)
++ if (dev_priv->sarea_kmap.virtual) {
++ drm_bo_kunmap(&dev_priv->sarea_kmap);
++ dev_priv->sarea_kmap.virtual = NULL;
++ dev->control->master->lock.hw_lock = NULL;
++ dev->sigdata.lock = NULL;
++ }
++
++ if (dev_priv->sarea_bo) {
++ mutex_lock(&dev->struct_mutex);
++ drm_bo_usage_deref_locked(&dev_priv->sarea_bo);
++ mutex_unlock(&dev->struct_mutex);
++ dev_priv->sarea_bo = NULL;
++ }
++#endif
+
+ i915_dma_cleanup(dev);
+}
+
+int i915_driver_firstopen(struct drm_device *dev)
+{
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+
+ drm_bo_driver_init(dev);
+ return 0;
+}
return count;
}
- /*
- * This code is called in a more safe envirmoent to handle the hotplugs.
- * Add code here for hotplug love to userspace.
+static struct drm_device *hotplug_dev;
+
+ /**
+ * Handler for user interrupts in process context (able to sleep, do VFS
+ * operations, etc.
+ *
+ * If another IRQ comes in while we're in this handler, it will still get put
+ * on the queue again to be rerun when we finish.
*/
-
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+static void i915_hotplug_work_func(void *work)
+#else
+static void i915_hotplug_work_func(struct work_struct *work)
+#endif
+{
+ struct drm_device *dev = hotplug_dev;
+
+ drm_helper_hotplug_stage_two(dev);
+ drm_handle_hotplug(dev);
+}
+
+static int i915_run_hotplug_tasklet(struct drm_device *dev, uint32_t stat)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+ static DECLARE_WORK(hotplug, i915_hotplug_work_func, NULL);
+#else
+ static DECLARE_WORK(hotplug, i915_hotplug_work_func);
+#endif
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ hotplug_dev = dev;
+
+ if (stat & TV_HOTPLUG_INT_STATUS) {
+ DRM_DEBUG("TV event\n");
+ }
+
+ if (stat & CRT_HOTPLUG_INT_STATUS) {
+ DRM_DEBUG("CRT event\n");
+ }
+
+ if (stat & SDVOB_HOTPLUG_INT_STATUS) {
+ DRM_DEBUG("sDVOB event\n");
+ }
+
+ if (stat & SDVOC_HOTPLUG_INT_STATUS) {
+ DRM_DEBUG("sDVOC event\n");
+ }
- drm_i915_private_t *dev_priv;
+ queue_work(dev_priv->wq, &hotplug);
+
+ return 0;
+}
+
+ void
+ i915_user_interrupt_handler(struct work_struct *work)
+ {
- dev_priv = container_of(work, drm_i915_private_t,
++ struct drm_i915_private *dev_priv;
+ struct drm_device *dev;
+
++ dev_priv = container_of(work, struct drm_i915_private,
+ user_interrupt_task);
+ dev = dev_priv->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_retire_requests(dev);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_master_private *master_priv;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
u32 iir;
- u32 pipea_stats, pipeb_stats;
+ u32 pipea_stats = 0, pipeb_stats, tvdac;
+ int hotplug = 0;
int vblank = 0;
- iir = I915_READ(I915REG_INT_IDENTITY_R);
+ /* On i8xx/i915 hw the IIR and IER are 16bit on i9xx its 32bit */
+ if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
+ iir = I915_READ(IIR);
+ else
+ iir = I915_READ16(IIR);
+
- iir &= (dev_priv->irq_enable_reg | I915_USER_INTERRUPT);
++ iir &= (dev_priv->irq_mask_reg | I915_USER_INTERRUPT);
+
#if 0
DRM_DEBUG("flag=%08x\n", iir);
#endif
return dev_priv->counter;
}
-void i915_user_irq_on(drm_i915_private_t *dev_priv)
+void i915_user_irq_on(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+
DRM_SPINLOCK(&dev_priv->user_irq_lock);
if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
- dev_priv->irq_enable_reg |= I915_USER_INTERRUPT;
+ dev_priv->irq_mask_reg &= ~I915_USER_INTERRUPT;
- I915_WRITE(I915REG_INT_MASK_R, dev_priv->irq_mask_reg);
- (void) I915_READ (I915REG_INT_ENABLE_R);
+ if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
- I915_WRITE(IER, dev_priv->irq_enable_reg);
++ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ else
- I915_WRITE16(IER, dev_priv->irq_enable_reg);
++ I915_WRITE16(IMR, dev_priv->irq_mask_reg);
++ I915_READ16(IMR);
}
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
}
-
-void i915_user_irq_off(drm_i915_private_t *dev_priv)
+
+void i915_user_irq_off(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+
DRM_SPINLOCK(&dev_priv->user_irq_lock);
+ BUG_ON(dev_priv->user_irq_refcount <= 0);
if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
- // dev_priv->irq_enable_reg &= ~I915_USER_INTERRUPT;
- // if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
- // I915_WRITE(IER, dev_priv->irq_enable_reg);
- // else
- // I915_WRITE16(IER, dev_priv->irq_enable_reg);
+ dev_priv->irq_mask_reg |= I915_USER_INTERRUPT;
- I915_WRITE(I915REG_INT_MASK_R, dev_priv->irq_mask_reg);
- (void) I915_READ(I915REG_INT_MASK_R);
++ if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
++ I915_WRITE(IMR, dev_priv->irq_mask_reg);
++ else
++ I915_WRITE16(IMR, dev_priv->irq_mask_reg);
++ I915_READ16(IMR);
}
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
}
- static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+ int i915_wait_irq(struct drm_device * dev, int irq_nr)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+ struct drm_i915_master_private *master_priv;
int ret = 0;
DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
int i915_enable_vblank(struct drm_device *dev, int plane)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
int pipe = i915_get_pipe(dev, plane);
u32 pipestat_reg = 0;
+ u32 mask_reg = 0;
u32 pipestat;
switch (pipe) {
case 0:
- pipestat_reg = I915REG_PIPEASTAT;
+ pipestat_reg = PIPEASTAT;
- dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+ mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
case 1:
- pipestat_reg = I915REG_PIPEBSTAT;
+ pipestat_reg = PIPEBSTAT;
- dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
break;
default:
DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
/*
* Clear any pending status
*/
- pipestat |= (I915_START_VBLANK_INTERRUPT_STATUS |
- I915_VBLANK_INTERRUPT_STATUS);
+ pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
+ PIPE_VBLANK_INTERRUPT_STATUS);
I915_WRITE(pipestat_reg, pipestat);
}
- I915_WRITE(I915REG_INT_MASK_R, dev_priv->irq_mask_reg);
- I915_READ(I915REG_INT_MASK_R);
+
+ DRM_SPINLOCK(&dev_priv->user_irq_lock);
+ dev_priv->irq_mask_reg &= ~mask_reg;
- I915_WRITE(IER, dev_priv->irq_enable_reg);
+ if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
- I915_WRITE16(IER, dev_priv->irq_enable_reg);
-
++ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ else
++ I915_WRITE16(IMR, dev_priv->irq_mask_reg);
+ DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
return 0;
}
void i915_disable_vblank(struct drm_device *dev, int plane)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
int pipe = i915_get_pipe(dev, plane);
u32 pipestat_reg = 0;
+ u32 mask_reg = 0;
u32 pipestat;
switch (pipe) {
case 0:
- pipestat_reg = I915REG_PIPEASTAT;
+ pipestat_reg = PIPEASTAT;
- dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+ mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
case 1:
- pipestat_reg = I915REG_PIPEBSTAT;
+ pipestat_reg = PIPEBSTAT;
- dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
break;
default:
DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
break;
}
- I915_WRITE(I915REG_INT_MASK_R, dev_priv->irq_mask_reg);
- (void) I915_READ (I915REG_INT_MASK_R);
+ DRM_SPINLOCK(&dev_priv->user_irq_lock);
+ dev_priv->irq_mask_reg |= mask_reg;
- I915_WRITE(IER, dev_priv->irq_enable_reg);
+ if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
- I915_WRITE16(IER, dev_priv->irq_enable_reg);
++ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ else
- if (pipestat_reg)
- {
++ I915_WRITE16(IMR, dev_priv->irq_mask_reg);
+ DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
- if (pipestat_reg)
- {
+
++ if (pipestat_reg) {
pipestat = I915_READ (pipestat_reg);
- pipestat &= ~(I915_START_VBLANK_INTERRUPT_ENABLE |
- I915_VBLANK_INTERRUPT_ENABLE);
+ pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
+ PIPE_VBLANK_INTERRUPT_ENABLE);
/*
* Clear any pending status
*/
- pipestat |= (I915_START_VBLANK_INTERRUPT_STATUS |
- I915_VBLANK_INTERRUPT_STATUS);
+ pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
+ PIPE_VBLANK_INTERRUPT_STATUS);
I915_WRITE(pipestat_reg, pipestat);
+ (void) I915_READ(pipestat_reg);
}
}
-static void i915_enable_interrupt (struct drm_device *dev)
+void i915_enable_interrupt (struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
- dev_priv->irq_mask_reg = (I915_USER_INTERRUPT |
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
- I915_WRITE(I915REG_INT_MASK_R, dev_priv->irq_mask_reg);
- I915_WRITE(I915REG_INT_ENABLE_R, dev_priv->irq_mask_reg);
- (void) I915_READ (I915REG_INT_ENABLE_R);
- dev_priv->irq_enabled = 1;
-}
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+ struct drm_connector *o;
- dev_priv->irq_enable_reg |= I915_USER_INTERRUPT;
-static void i915_disable_interrupt (struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
- I915_WRITE(I915REG_HWSTAM, 0xffffffff);
- I915_WRITE(I915REG_INT_MASK_R, 0xffffffff);
- I915_WRITE(I915REG_INT_ENABLE_R, 0);
- I915_WRITE(I915REG_INT_IDENTITY_R, 0xffffffff);
- (void) I915_READ (I915REG_INT_IDENTITY_R);
- dev_priv->irq_enabled = 0;
++ dev_priv->irq_mask_reg &= ~I915_USER_INTERRUPT;
+
+ if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
+ if (dev->mode_config.num_connector)
- dev_priv->irq_enable_reg |= I915_DISPLAY_PORT_INTERRUPT;
++ dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT;
+ } else {
+ if (dev->mode_config.num_connector)
- dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
++ dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+
+ /* Enable global interrupts for hotplug - not a pipeA event */
+ I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) |
+ PIPE_HOTPLUG_INTERRUPT_ENABLE |
+ PIPE_HOTPLUG_TV_INTERRUPT_ENABLE |
+ PIPE_HOTPLUG_TV_INTERRUPT_STATUS |
+ PIPE_HOTPLUG_INTERRUPT_STATUS);
+ }
+
- if (dev_priv->irq_enable_reg & (I915_DISPLAY_PORT_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)) {
++ if (!(dev_priv->irq_mask_reg & I915_DISPLAY_PORT_INTERRUPT) ||
++ !(dev_priv->irq_mask_reg & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)) {
+ u32 temp = 0;
+
+ if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
+ temp = I915_READ(PORT_HOTPLUG_EN);
+
+ /* Activate the CRT */
+ temp |= CRT_HOTPLUG_INT_EN;
+ }
+
+ if (IS_I9XX(dev)) {
+ /* SDVOB */
+ o = intel_sdvo_find(dev, 1);
+ if (o && intel_sdvo_supports_hotplug(o)) {
+ intel_sdvo_set_hotplug(o, 1);
+ temp |= SDVOB_HOTPLUG_INT_EN;
+ }
+
+ /* SDVOC */
+ o = intel_sdvo_find(dev, 0);
+ if (o && intel_sdvo_supports_hotplug(o)) {
+ intel_sdvo_set_hotplug(o, 1);
+ temp |= SDVOC_HOTPLUG_INT_EN;
+ }
+
+ I915_WRITE(SDVOB, I915_READ(SDVOB) | SDVO_INTERRUPT_ENABLE);
+ I915_WRITE(SDVOC, I915_READ(SDVOC) | SDVO_INTERRUPT_ENABLE);
+
+ /* TV */
+ I915_WRITE(TV_DAC, I915_READ(TV_DAC) | TVDAC_STATE_CHG_EN);
+ } else {
+ /* DVO ???? */
+ }
+
+ if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, temp);
+
+ DRM_DEBUG("HEN %08x\n",I915_READ(PORT_HOTPLUG_EN));
+ DRM_DEBUG("HST %08x\n",I915_READ(PORT_HOTPLUG_STAT));
+
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ }
+ }
+
- if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
- I915_WRITE(IER, dev_priv->irq_enable_reg);
- else
- I915_WRITE16(IER, dev_priv->irq_enable_reg);
++ if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
++ I915_WRITE(IMR, dev_priv->irq_mask_reg);
++ I915_WRITE(IER, ~dev_priv->irq_mask_reg);
++ } else {
++ I915_WRITE16(IMR, dev_priv->irq_mask_reg);
++ I915_WRITE16(IER, ~(u16)dev_priv->irq_mask_reg);
++ }
+
+ dev_priv->irq_enabled = 1;
}
/* Set the vblank monitor pipe
*/
void i915_driver_irq_preinstall(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+ u32 tmp;
+
+ tmp = I915_READ(PIPEASTAT);
+ I915_WRITE(PIPEASTAT, tmp);
+ tmp = I915_READ(PIPEBSTAT);
+ I915_WRITE(PIPEBSTAT, tmp);
+
- I915_WRITE(I915REG_HWSTAM, 0xffff);
- I915_WRITE(I915REG_INT_ENABLE_R, 0x0);
- I915_WRITE(I915REG_INT_MASK_R, 0xffffffff);
- I915_WRITE(I915REG_INT_IDENTITY_R, 0xffffffff);
- (void) I915_READ(I915REG_INT_IDENTITY_R);
+ I915_WRITE16(HWSTAM, 0xeffe);
+ if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
+ I915_WRITE(IMR, 0x0);
+ I915_WRITE(IER, 0x0);
+ tmp = I915_READ(IIR);
+ I915_WRITE(IIR, tmp);
+ } else {
+ I915_WRITE16(IMR, 0x0);
+ I915_WRITE16(IER, 0x0);
+ tmp = I915_READ16(IIR);
+ I915_WRITE16(IIR, tmp);
+ }
-
}
int i915_driver_irq_postinstall(struct drm_device * dev)
DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
dev_priv->user_irq_refcount = 0;
- dev_priv->irq_enable_reg = 0;
- dev_priv->irq_mask_reg = 0;
++ dev_priv->irq_mask_reg = ~0;
ret = drm_vblank_init(dev, num_pipes);
if (ret)
if (!dev_priv)
return;
- dev_priv->irq_enabled = 0;
- i915_disable_interrupt (dev);
++ dev_priv->irq_enabled = 1;
+
+ temp = I915_READ(PIPEASTAT);
+ I915_WRITE(PIPEASTAT, temp);
+ temp = I915_READ(PIPEBSTAT);
+ I915_WRITE(PIPEBSTAT, temp);
+ if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
+ I915_WRITE(HWSTAM, 0xffffffff);
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+
+ temp = I915_READ(IIR);
+ I915_WRITE(IIR, temp);
+ } else {
+ I915_WRITE16(HWSTAM, 0xffff);
+ I915_WRITE16(IMR, 0xffff);
+ I915_WRITE16(IER, 0x0);
- temp = I915_READ(I915REG_PIPEASTAT);
- I915_WRITE(I915REG_PIPEASTAT, temp);
- temp = I915_READ(I915REG_PIPEBSTAT);
- I915_WRITE(I915REG_PIPEBSTAT, temp);
+ temp = I915_READ16(IIR);
+ I915_WRITE16(IIR, temp);
+ }
}