* tag 'drm-intel-next-2012-02-07' of git://people.freedesktop.org/~danvet/drm-intel: (29 commits)
drm/i915: Handle unmappable buffers during error state capture
drm/i915: rewrite shmem_pread_slow to use copy_to_user
drm/i915: rewrite shmem_pwrite_slow to use copy_from_user
drm/i915: fall through pwrite_gtt_slow to the shmem slow path
drm/i915: add debugfs file for swizzling information
drm/i915: fix swizzle detection for gen3
drm/i915: Remove the upper limit on the bo size for mapping into the CPU domain
drm/i915: add per-ring fault reg to error_state
drm/i915: reject GTT domain in relocations
drm/i915: remove the i915_batchbuffer_info debugfs file
drm/i915: capture error_state also for stuck rings
drm/i915: refactor debugfs create functions
drm/i915: refactor debugfs open function
drm/i915: don't trash the gtt when running out of fences
drm/i915: Separate fence pin counting from normal bind pin counting
drm/i915/ringbuffer: kill snb blt workaround
drm/i915: collect more per ring error state
drm/i915: refactor ring error state capture to use arrays
drm/i915: switch ring->id to be a real id
drm/i915: set AUD_CONFIG N_value_index for DisplayPort
...
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
- seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
+ seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
- obj->base.size,
+ obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain,
obj->last_rendering_seqno,
return 0;
}
- static void i915_dump_object(struct seq_file *m,
- struct io_mapping *mapping,
- struct drm_i915_gem_object *obj)
- {
- int page, page_count, i;
-
- page_count = obj->base.size / PAGE_SIZE;
- for (page = 0; page < page_count; page++) {
- u32 *mem = io_mapping_map_wc(mapping,
- obj->gtt_offset + page * PAGE_SIZE);
- for (i = 0; i < PAGE_SIZE; i += 4)
- seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
- io_mapping_unmap(mem);
- }
- }
-
- static int i915_batchbuffer_info(struct seq_file *m, void *data)
- {
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
- if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
- seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
- i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
- }
- }
-
- mutex_unlock(&dev->struct_mutex);
- return 0;
- }
-
static int i915_ringbuffer_data(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
seq_printf(m, " Size : %08x\n", ring->size);
seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
}
static const char *ring_str(int ring)
{
switch (ring) {
- case RING_RENDER: return " render";
- case RING_BSD: return " bsd";
- case RING_BLT: return " blt";
+ case RCS: return "render";
+ case VCS: return "bsd";
+ case BCS: return "blt";
default: return "";
}
}
seq_printf(m, "%s [%d]:\n", name, count);
while (count--) {
- seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s",
+ seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s",
err->gtt_offset,
err->size,
err->read_domains,
tiling_flag(err->tiling),
dirty_flag(err->dirty),
purgeable_flag(err->purgeable),
+ err->ring != -1 ? " " : "",
ring_str(err->ring),
cache_level_str(err->cache_level));
}
}
+ static void i915_ring_error_state(struct seq_file *m,
+ struct drm_device *dev,
+ struct drm_i915_error_state *error,
+ unsigned ring)
+ {
+ seq_printf(m, "%s command stream:\n", ring_str(ring));
+ seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
+ seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
+ seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
+ seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
+ seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
+ seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
+ if (ring == RCS && INTEL_INFO(dev)->gen >= 4) {
+ seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
+ seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
+ }
+ if (INTEL_INFO(dev)->gen >= 4)
+ seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
+ seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
+ if (INTEL_INFO(dev)->gen >= 6) {
+ seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
+ seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
+ }
+ seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
+ }
+
static int i915_error_state(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
seq_printf(m, "EIR: 0x%08x\n", error->eir);
seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+
+ for (i = 0; i < dev_priv->num_fence_regs; i++)
+ seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
+
if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m, "ERROR: 0x%08x\n", error->error);
- seq_printf(m, "Blitter command stream:\n");
- seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd);
- seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir);
- seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr);
- seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone);
- seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno);
- seq_printf(m, "Video (BSD) command stream:\n");
- seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd);
- seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir);
- seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr);
- seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone);
- seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno);
+ seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
- seq_printf(m, "Render command stream:\n");
- seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
- seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
- seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
- seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
- if (INTEL_INFO(dev)->gen >= 4) {
- seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
- seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
- }
- seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
- seq_printf(m, " seqno: 0x%08x\n", error->seqno);
- for (i = 0; i < dev_priv->num_fence_regs; i++)
- seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
+ i915_ring_error_state(m, dev, error, RCS);
+ if (HAS_BLT(dev))
+ i915_ring_error_state(m, dev, error, BCS);
+ if (HAS_BSD(dev))
+ i915_ring_error_state(m, dev, error, VCS);
if (error->active_bo)
print_error_buffers(m, "Active",
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rpmodectl1, gt_core_status, rcctl1;
+ unsigned forcewake_count;
int count=0, ret;
if (ret)
return ret;
- if (atomic_read(&dev_priv->forcewake_count)) {
- seq_printf(m, "RC information inaccurate because userspace "
- "holds a reference \n");
+ spin_lock_irq(&dev_priv->gt_lock);
+ forcewake_count = dev_priv->forcewake_count;
+ spin_unlock_irq(&dev_priv->gt_lock);
+
+ if (forcewake_count) {
+ seq_printf(m, "RC information inaccurate because somebody "
+ "holds a forcewake reference \n");
} else {
/* NB: we cannot use forcewake, else we read the wrong values */
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
seq_printf(m, "SW control enabled: %s\n",
yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
GEN6_RP_MEDIA_SW_MODE));
- seq_printf(m, "RC6 Enabled: %s\n",
+ seq_printf(m, "RC1e Enabled: %s\n",
yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
seq_printf(m, "RC6 Enabled: %s\n",
yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned forcewake_count;
+
+ spin_lock_irq(&dev_priv->gt_lock);
+ forcewake_count = dev_priv->forcewake_count;
+ spin_unlock_irq(&dev_priv->gt_lock);
- seq_printf(m, "forcewake count = %d\n",
- atomic_read(&dev_priv->forcewake_count));
+ seq_printf(m, "forcewake count = %u\n", forcewake_count);
return 0;
}
+ static const char *swizzle_string(unsigned swizzle)
+ {
+ switch(swizzle) {
+ case I915_BIT_6_SWIZZLE_NONE:
+ return "none";
+ case I915_BIT_6_SWIZZLE_9:
+ return "bit9";
+ case I915_BIT_6_SWIZZLE_9_10:
+ return "bit9/bit10";
+ case I915_BIT_6_SWIZZLE_9_11:
+ return "bit9/bit11";
+ case I915_BIT_6_SWIZZLE_9_10_11:
+ return "bit9/bit10/bit11";
+ case I915_BIT_6_SWIZZLE_9_17:
+ return "bit9/bit17";
+ case I915_BIT_6_SWIZZLE_9_10_17:
+ return "bit9/bit10/bit17";
+ case I915_BIT_6_SWIZZLE_UNKNOWN:
+ return "unkown";
+ }
+
+ return "bug";
+ }
+
+ static int i915_swizzle_info(struct seq_file *m, void *data)
+ {
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+ seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
+ swizzle_string(dev_priv->mm.bit_6_swizzle_x));
+ seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
+ swizzle_string(dev_priv->mm.bit_6_swizzle_y));
+
+ if (IS_GEN3(dev) || IS_GEN4(dev)) {
+ seq_printf(m, "DDC = 0x%08x\n",
+ I915_READ(DCC));
+ seq_printf(m, "C0DRB3 = 0x%04x\n",
+ I915_READ16(C0DRB3));
+ seq_printf(m, "C1DRB3 = 0x%04x\n",
+ I915_READ16(C1DRB3));
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+ }
+
static int
- i915_wedged_open(struct inode *inode,
- struct file *filp)
+ i915_debugfs_common_open(struct inode *inode,
+ struct file *filp)
{
filp->private_data = inode->i_private;
return 0;
static const struct file_operations i915_wedged_fops = {
.owner = THIS_MODULE,
- .open = i915_wedged_open,
+ .open = i915_debugfs_common_open,
.read = i915_wedged_read,
.write = i915_wedged_write,
.llseek = default_llseek,
};
- static int
- i915_max_freq_open(struct inode *inode,
- struct file *filp)
- {
- filp->private_data = inode->i_private;
- return 0;
- }
-
static ssize_t
i915_max_freq_read(struct file *filp,
char __user *ubuf,
static const struct file_operations i915_max_freq_fops = {
.owner = THIS_MODULE,
- .open = i915_max_freq_open,
+ .open = i915_debugfs_common_open,
.read = i915_max_freq_read,
.write = i915_max_freq_write,
.llseek = default_llseek,
};
- static int
- i915_cache_sharing_open(struct inode *inode,
- struct file *filp)
- {
- filp->private_data = inode->i_private;
- return 0;
- }
-
static ssize_t
i915_cache_sharing_read(struct file *filp,
char __user *ubuf,
static const struct file_operations i915_cache_sharing_fops = {
.owner = THIS_MODULE,
- .open = i915_cache_sharing_open,
+ .open = i915_debugfs_common_open,
.read = i915_cache_sharing_read,
.write = i915_cache_sharing_write,
.llseek = default_llseek,
return 0;
}
- static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
- {
- struct drm_device *dev = minor->dev;
- struct dentry *ent;
-
- ent = debugfs_create_file("i915_wedged",
- S_IRUGO | S_IWUSR,
- root, dev,
- &i915_wedged_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
-
- return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
- }
-
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- if (!IS_GEN6(dev))
+ if (INTEL_INFO(dev)->gen < 6)
return 0;
ret = mutex_lock_interruptible(&dev->struct_mutex);
struct drm_device *dev = inode->i_private;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!IS_GEN6(dev))
+ if (INTEL_INFO(dev)->gen < 6)
return 0;
/*
return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
}
- static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor)
- {
- struct drm_device *dev = minor->dev;
- struct dentry *ent;
-
- ent = debugfs_create_file("i915_max_freq",
- S_IRUGO | S_IWUSR,
- root, dev,
- &i915_max_freq_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
-
- return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops);
- }
-
- static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor)
+ static int i915_debugfs_create(struct dentry *root,
+ struct drm_minor *minor,
+ const char *name,
+ const struct file_operations *fops)
{
struct drm_device *dev = minor->dev;
struct dentry *ent;
- ent = debugfs_create_file("i915_cache_sharing",
+ ent = debugfs_create_file(name,
S_IRUGO | S_IWUSR,
root, dev,
- &i915_cache_sharing_fops);
+ fops);
if (IS_ERR(ent))
return PTR_ERR(ent);
- return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops);
+ return drm_add_fake_info_node(minor, ent, fops);
}
static struct drm_info_list i915_debugfs_list[] = {
{"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
{"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
{"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
- {"i915_batchbuffers", i915_batchbuffer_info, 0},
{"i915_error_state", i915_error_state, 0},
{"i915_rstdby_delays", i915_rstdby_delays, 0},
{"i915_cur_delayinfo", i915_cur_delayinfo, 0},
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_context_status", i915_context_status, 0},
{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
+ {"i915_swizzle_info", i915_swizzle_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
{
int ret;
- ret = i915_wedged_create(minor->debugfs_root, minor);
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_wedged",
+ &i915_wedged_fops);
if (ret)
return ret;
ret = i915_forcewake_create(minor->debugfs_root, minor);
if (ret)
return ret;
- ret = i915_max_freq_create(minor->debugfs_root, minor);
+
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_max_freq",
+ &i915_max_freq_fops);
if (ret)
return ret;
- ret = i915_cache_sharing_create(minor->debugfs_root, minor);
+
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_cache_sharing",
+ &i915_cache_sharing_fops);
if (ret)
return ret;
if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev);
+ spin_lock_init(&dev_priv->gt_lock);
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
spin_lock_init(&dev_priv->rps_lock);
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
mutex_lock(&dev->struct_mutex);
- ret = i915_gpu_idle(dev);
+ ret = i915_gpu_idle(dev, true);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
mutex_unlock(&dev->struct_mutex);
struct list_head lru_list;
struct drm_i915_gem_object *obj;
uint32_t setup_seqno;
+ int pin_count;
};
struct sdvo_device_mapping {
u32 eir;
u32 pgtbl_er;
u32 pipestat[I915_MAX_PIPES];
- u32 ipeir;
- u32 ipehr;
- u32 instdone;
- u32 acthd;
+ u32 tail[I915_NUM_RINGS];
+ u32 head[I915_NUM_RINGS];
+ u32 ipeir[I915_NUM_RINGS];
+ u32 ipehr[I915_NUM_RINGS];
+ u32 instdone[I915_NUM_RINGS];
+ u32 acthd[I915_NUM_RINGS];
u32 error; /* gen6+ */
- u32 bcs_acthd; /* gen6+ blt engine */
- u32 bcs_ipehr;
- u32 bcs_ipeir;
- u32 bcs_instdone;
- u32 bcs_seqno;
- u32 vcs_acthd; /* gen6+ bsd engine */
- u32 vcs_ipehr;
- u32 vcs_ipeir;
- u32 vcs_instdone;
- u32 vcs_seqno;
- u32 instpm;
- u32 instps;
+ u32 instpm[I915_NUM_RINGS];
+ u32 instps[I915_NUM_RINGS];
u32 instdone1;
- u32 seqno;
+ u32 seqno[I915_NUM_RINGS];
u64 bbaddr;
+ u32 fault_reg[I915_NUM_RINGS];
+ u32 done_reg;
+ u32 faddr[I915_NUM_RINGS];
u64 fence[I915_MAX_NUM_FENCES];
struct timeval time;
struct drm_i915_error_object {
int relative_constants_mode;
void __iomem *regs;
- u32 gt_fifo_count;
+ /** gt_fifo_count and the subsequent register write are synchronized
+ * with dev->struct_mutex. */
+ unsigned gt_fifo_count;
+ /** forcewake_count is protected by gt_lock */
+ unsigned forcewake_count;
+ /** gt_lock is also taken in irq contexts. */
+ struct spinlock gt_lock;
struct intel_gmbus {
struct i2c_adapter adapter;
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
-
- atomic_t forcewake_count;
} drm_i915_private_t;
enum i915_cache_level {
struct intel_ring_buffer *pipelined);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+ static inline void
+ i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
+ {
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ dev_priv->fence_regs[obj->fence_reg].pin_count++;
+ }
+ }
+
+ static inline void
+ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
+ {
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ dev_priv->fence_regs[obj->fence_reg].pin_count--;
+ }
+ }
+
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
unsigned long start,
unsigned long mappable_end,
unsigned long end);
- int __must_check i915_gpu_idle(struct drm_device *dev);
+ int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire);
int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request);
int __must_check i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno);
+ uint32_t seqno,
+ bool do_retire);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
cd->invalidate_domains |= invalidate_domains;
cd->flush_domains |= flush_domains;
if (flush_domains & I915_GEM_GPU_DOMAINS)
- cd->flush_rings |= obj->ring->id;
+ cd->flush_rings |= intel_ring_flag(obj->ring);
if (invalidate_domains & I915_GEM_GPU_DOMAINS)
- cd->flush_rings |= ring->id;
+ cd->flush_rings |= intel_ring_flag(ring);
}
struct eb_objects {
reloc->write_domain);
return ret;
}
- if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
- DRM_ERROR("reloc with read/write CPU domains: "
+ if (unlikely((reloc->write_domain | reloc->read_domains)
+ & ~I915_GEM_GPU_DOMAINS)) {
+ DRM_ERROR("reloc with read/write non-GPU domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc->target_handle,
return ret;
}
+ #define __EXEC_OBJECT_HAS_FENCE (1<<31)
+
+ static int
+ pin_and_fence_object(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring)
+ {
+ struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+ bool need_fence, need_mappable;
+ int ret;
+
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+ need_mappable =
+ entry->relocation_count ? true : need_fence;
+
+ ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
+ if (ret)
+ return ret;
+
+ if (has_fenced_gpu_access) {
+ if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
+ if (obj->tiling_mode) {
+ ret = i915_gem_object_get_fence(obj, ring);
+ if (ret)
+ goto err_unpin;
+
+ entry->flags |= __EXEC_OBJECT_HAS_FENCE;
+ i915_gem_object_pin_fence(obj);
+ } else {
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ goto err_unpin;
+ }
+ }
+ obj->pending_fenced_gpu_access = need_fence;
+ }
+
+ entry->offset = obj->gtt_offset;
+ return 0;
+
+ err_unpin:
+ i915_gem_object_unpin(obj);
+ return ret;
+ }
+
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file,
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable;
+
if (!obj->gtt_space)
continue;
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
else
- ret = i915_gem_object_pin(obj,
- entry->alignment,
- need_mappable);
+ ret = pin_and_fence_object(obj, ring);
if (ret)
goto err;
-
- entry++;
}
/* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) {
- struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
- bool need_fence;
-
- need_fence =
- has_fenced_gpu_access &&
- entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
- obj->tiling_mode != I915_TILING_NONE;
-
- if (!obj->gtt_space) {
- bool need_mappable =
- entry->relocation_count ? true : need_fence;
-
- ret = i915_gem_object_pin(obj,
- entry->alignment,
- need_mappable);
- if (ret)
- break;
- }
+ if (obj->gtt_space)
+ continue;
- if (has_fenced_gpu_access) {
- if (need_fence) {
- ret = i915_gem_object_get_fence(obj, ring);
- if (ret)
- break;
- } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
- obj->tiling_mode == I915_TILING_NONE) {
- /* XXX pipelined! */
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- break;
- }
- obj->pending_fenced_gpu_access = need_fence;
+ ret = pin_and_fence_object(obj, ring);
+ if (ret) {
+ int ret_ignore;
+
+ /* This can potentially raise a harmless
+ * -EINVAL if we failed to bind in the above
+ * call. It cannot raise -EINTR since we know
+ * that the bo is freshly bound and so will
+ * not need to be flushed or waited upon.
+ */
+ ret_ignore = i915_gem_object_unbind(obj);
+ (void)ret_ignore;
+ WARN_ON(obj->gtt_space);
+ break;
}
-
- entry->offset = obj->gtt_offset;
}
/* Decrement pin count for bound objects */
list_for_each_entry(obj, objects, exec_list) {
- if (obj->gtt_space)
- i915_gem_object_unpin(obj);
+ struct drm_i915_gem_exec_object2 *entry;
+
+ if (!obj->gtt_space)
+ continue;
+
+ entry = obj->exec_entry;
+ if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
+ i915_gem_object_unpin_fence(obj);
+ entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
+ }
+
+ i915_gem_object_unpin(obj);
}
if (ret != -ENOSPC || retry > 1)
} while (1);
err:
- obj = list_entry(obj->exec_list.prev,
- struct drm_i915_gem_object,
- exec_list);
- while (objects != &obj->exec_list) {
- if (obj->gtt_space)
- i915_gem_object_unpin(obj);
+ list_for_each_entry_continue_reverse(obj, objects, exec_list) {
+ struct drm_i915_gem_exec_object2 *entry;
+
+ if (!obj->gtt_space)
+ continue;
+
+ entry = obj->exec_entry;
+ if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
+ i915_gem_object_unpin_fence(obj);
+ entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
+ }
- obj = list_entry(obj->exec_list.prev,
- struct drm_i915_gem_object,
- exec_list);
+ i915_gem_object_unpin(obj);
}
return ret;
if (i915_semaphores >= 0)
return i915_semaphores;
- /* Enable semaphores on SNB when IO remapping is off */
+ /* Disable semaphores on SNB */
if (INTEL_INFO(dev)->gen == 6)
- return !intel_iommu_enabled;
+ return 0;
return 1;
}
* so every billion or so execbuffers, we need to stall
* the GPU in order to reset the counters.
*/
- ret = i915_gpu_idle(dev);
+ ret = i915_gpu_idle(dev, true);
if (ret)
goto err;
reloc_offset = src->gtt_offset;
for (page = 0; page < page_count; page++) {
unsigned long flags;
- void __iomem *s;
void *d;
d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
goto unwind;
local_irq_save(flags);
- s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- reloc_offset);
- memcpy_fromio(d, s, PAGE_SIZE);
- io_mapping_unmap_atomic(s);
+ if (reloc_offset < dev_priv->mm.gtt_mappable_end) {
+ void __iomem *s;
+
+ /* Simply ignore tiling or any overlapping fence.
+ * It's part of the error state, and this hopefully
+ * captures what the GPU read.
+ */
+
+ s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ reloc_offset);
+ memcpy_fromio(d, s, PAGE_SIZE);
+ io_mapping_unmap_atomic(s);
+ } else {
+ void *s;
+
+ drm_clflush_pages(&src->pages[page], 1);
+
+ s = kmap_atomic(src->pages[page]);
+ memcpy(d, s, PAGE_SIZE);
+ kunmap_atomic(s);
+
+ drm_clflush_pages(&src->pages[page], 1);
+ }
local_irq_restore(flags);
dst->pages[page] = d;
err->tiling = obj->tiling_mode;
err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED;
- err->ring = obj->ring ? obj->ring->id : 0;
+ err->ring = obj->ring ? obj->ring->id : -1;
err->cache_level = obj->cache_level;
if (++i == count)
return NULL;
}
+ static void i915_record_ring_state(struct drm_device *dev,
+ struct drm_i915_error_state *error,
+ struct intel_ring_buffer *ring)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
+ error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
+ }
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
+ error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
+ error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
+ error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
+ if (ring->id == RCS) {
+ error->instdone1 = I915_READ(INSTDONE1);
+ error->bbaddr = I915_READ64(BB_ADDR);
+ }
+ } else {
+ error->ipeir[ring->id] = I915_READ(IPEIR);
+ error->ipehr[ring->id] = I915_READ(IPEHR);
+ error->instdone[ring->id] = I915_READ(INSTDONE);
+ }
+
+ error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
+ error->seqno[ring->id] = ring->get_seqno(ring);
+ error->acthd[ring->id] = intel_ring_get_active_head(ring);
+ error->head[ring->id] = I915_READ_HEAD(ring);
+ error->tail[ring->id] = I915_READ_TAIL(ring);
+ }
+
/**
* i915_capture_error_state - capture an error record for later analysis
* @dev: drm device
return;
/* Account for pipe specific data like PIPE*STAT */
- error = kmalloc(sizeof(*error), GFP_ATOMIC);
+ error = kzalloc(sizeof(*error), GFP_ATOMIC);
if (!error) {
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
return;
DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
dev->primary->index);
- error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
for_each_pipe(pipe)
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
- error->instpm = I915_READ(INSTPM);
- error->error = 0;
+
if (INTEL_INFO(dev)->gen >= 6) {
error->error = I915_READ(ERROR_GEN6);
-
- error->bcs_acthd = I915_READ(BCS_ACTHD);
- error->bcs_ipehr = I915_READ(BCS_IPEHR);
- error->bcs_ipeir = I915_READ(BCS_IPEIR);
- error->bcs_instdone = I915_READ(BCS_INSTDONE);
- error->bcs_seqno = 0;
- if (dev_priv->ring[BCS].get_seqno)
- error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]);
-
- error->vcs_acthd = I915_READ(VCS_ACTHD);
- error->vcs_ipehr = I915_READ(VCS_IPEHR);
- error->vcs_ipeir = I915_READ(VCS_IPEIR);
- error->vcs_instdone = I915_READ(VCS_INSTDONE);
- error->vcs_seqno = 0;
- if (dev_priv->ring[VCS].get_seqno)
- error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]);
- }
- if (INTEL_INFO(dev)->gen >= 4) {
- error->ipeir = I915_READ(IPEIR_I965);
- error->ipehr = I915_READ(IPEHR_I965);
- error->instdone = I915_READ(INSTDONE_I965);
- error->instps = I915_READ(INSTPS);
- error->instdone1 = I915_READ(INSTDONE1);
- error->acthd = I915_READ(ACTHD_I965);
- error->bbaddr = I915_READ64(BB_ADDR);
- } else {
- error->ipeir = I915_READ(IPEIR);
- error->ipehr = I915_READ(IPEHR);
- error->instdone = I915_READ(INSTDONE);
- error->acthd = I915_READ(ACTHD);
- error->bbaddr = 0;
+ error->done_reg = I915_READ(DONE_REG);
}
+
+ i915_record_ring_state(dev, error, &dev_priv->ring[RCS]);
+ if (HAS_BLT(dev))
+ i915_record_ring_state(dev, error, &dev_priv->ring[BCS]);
+ if (HAS_BSD(dev))
+ i915_record_ring_state(dev, error, &dev_priv->ring[VCS]);
+
i915_gem_record_fences(dev, error);
/* Record the active batch and ring buffers */
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
+ unsigned long flags;
- spin_lock(&dev_priv->error_lock);
+ spin_lock_irqsave(&dev_priv->error_lock, flags);
error = dev_priv->first_error;
dev_priv->first_error = NULL;
- spin_unlock(&dev_priv->error_lock);
+ spin_unlock_irqrestore(&dev_priv->error_lock, flags);
if (error)
i915_error_state_free(dev, error);
dev_priv->last_instdone1 == instdone1) {
if (dev_priv->hangcheck_count++ > 1) {
DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+ i915_handle_error(dev, true);
if (!IS_GEN2(dev)) {
/* Is the chip hanging on a WAIT_FOR_EVENT?
* and break the hang. This should work on
* all but the second generation chipsets.
*/
-
if (kick_ring(&dev_priv->ring[RCS]))
goto repeat;
goto repeat;
}
- i915_handle_error(dev, true);
return;
}
} else {
INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
I915_WRITE(HWSTAM, 0xeffe);
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
+
+ if (IS_GEN6(dev)) {
/* Workaround stalls observed on Sandy Bridge GPUs by
* making the blitter command streamer generate a
* write to the Hardware Status Page for
u32 val;
bool cur_state;
+ /* if we need the pipe A quirk it must be always on */
+ if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
+ state = true;
+
reg = PIPECONF(pipe);
val = I915_READ(reg);
cur_state = !!(val & PIPECONF_ENABLE);
ret = i915_gem_object_get_fence(obj, pipelined);
if (ret)
goto err_unpin;
+
+ i915_gem_object_pin_fence(obj);
}
dev_priv->mm.interruptible = true;
return ret;
}
+ void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
+ {
+ i915_gem_object_unpin_fence(obj);
+ i915_gem_object_unpin(obj);
+ }
+
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y)
{
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
LEAVE_ATOMIC_MODE_SET);
if (ret) {
- i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
+ intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
DRM_ERROR("failed to update base address\n");
return ret;
if (old_fb) {
intel_wait_for_vblank(dev, intel_crtc->pipe);
- i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
+ intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
}
mutex_unlock(&dev->struct_mutex);
if (crtc->fb) {
mutex_lock(&dev->struct_mutex);
- i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
+ intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
}
}
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
int fbc_wm, plane_wm, cursor_wm;
unsigned int enabled;
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
- I915_WRITE(WM0_PIPEA_ILK,
- (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ val = I915_READ(WM0_PIPEA_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEA_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
- I915_WRITE(WM0_PIPEB_ILK,
- (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ val = I915_READ(WM0_PIPEB_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEB_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
- I915_WRITE(WM0_PIPEC_IVB,
- (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ val = I915_READ(WM0_PIPEC_IVB);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEC_IVB, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
int sprite_wm, reg;
int ret;
return;
}
- I915_WRITE(reg, I915_READ(reg) | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
+ val = I915_READ(reg);
+ val &= ~WM0_PIPE_SPRITE_MASK;
+ I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
if (is_lvds) {
temp = I915_READ(PCH_LVDS);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~PORT_TRANS_SEL_MASK;
temp |= PORT_TRANS_SEL_CPT(pipe);
- else if (pipe == 1)
- temp |= LVDS_PIPEB_SELECT;
- else
- temp &= ~LVDS_PIPEB_SELECT;
+ } else {
+ if (pipe == 1)
+ temp |= LVDS_PIPEB_SELECT;
+ else
+ temp &= ~LVDS_PIPEB_SELECT;
+ }
/* set the corresponsding LVDS_BORDER bit */
temp |= dev_priv->lvds_border_bits;
uint32_t i;
int len;
int hdmiw_hdmiedid;
+ int aud_config;
int aud_cntl_st;
int aud_cntrl_st2;
if (HAS_PCH_IBX(connector->dev)) {
hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
+ aud_config = IBX_AUD_CONFIG_A;
aud_cntl_st = IBX_AUD_CNTL_ST_A;
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else {
hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
+ aud_config = CPT_AUD_CONFIG_A;
aud_cntl_st = CPT_AUD_CNTL_ST_A;
aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
}
i = to_intel_crtc(crtc)->pipe;
hdmiw_hdmiedid += i * 0x100;
aud_cntl_st += i * 0x100;
+ aud_config += i * 0x100;
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
- }
+ I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
+ } else
+ I915_WRITE(aud_config, 0);
if (intel_eld_uptodate(connector,
aud_cntrl_st2, eldv,
container_of(__work, struct intel_unpin_work, work);
mutex_lock(&work->dev->struct_mutex);
- i915_gem_object_unpin(work->old_fb_obj);
+ intel_unpin_fb_obj(work->old_fb_obj);
drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(&work->old_fb_obj->base);
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitches[0]);
OUT_RING(obj->gtt_offset + offset);
- OUT_RING(MI_NOOP);
+ OUT_RING(0); /* aux display base address, unused */
ADVANCE_LP_RING();
out:
return ret;
case DRM_FORMAT_VYUY:
break;
default:
- DRM_ERROR("unsupported pixel format\n");
+ DRM_DEBUG_KMS("unsupported pixel format %u\n",
+ mode_cmd->pixel_format);
return -EINVAL;
}
return 0;
/*
- * Enable rc6 on Sandybridge if DMA remapping is disabled
+ * Disable rc6 on Sandybridge
*/
if (INTEL_INFO(dev)->gen == 6) {
- DRM_DEBUG_DRIVER("Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
- intel_iommu_enabled ? "true" : "false",
- !intel_iommu_enabled ? "en" : "dis");
- return !intel_iommu_enabled;
+ DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
+ return 0;
}
DRM_DEBUG_DRIVER("RC6 enabled\n");
return 1;
for (i = 0; i < dev_priv->num_pipe; i++) {
intel_crtc_init(dev, i);
- if (HAS_PCH_SPLIT(dev)) {
- ret = intel_plane_init(dev, i);
- if (ret)
- DRM_ERROR("plane %d init failed: %d\n",
- i, ret);
- }
+ ret = intel_plane_init(dev, i);
+ if (ret)
+ DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
}
/* Just disable it once at startup */
if (INTEL_INFO(dev)->gen > 3) {
int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
- if (IS_GEN6(dev) || IS_GEN7(dev))
- mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
I915_WRITE(MI_MODE, mode);
if (IS_GEN7(dev))
I915_WRITE(GFX_MODE_GEN7,
return 0;
}
+static u32
+gen6_ring_get_seqno(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+
+ /* Workaround to force correct ordering between irq and seqno writes on
+ * ivb (and maybe also on snb) by reading from a CS register (like
+ * ACTHD) before reading the status page. */
+ if (IS_GEN7(dev))
+ intel_ring_get_active_head(ring);
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
static u32
ring_get_seqno(struct intel_ring_buffer *ring)
{
*/
if (IS_GEN7(dev)) {
switch (ring->id) {
- case RING_RENDER:
+ case RCS:
mmio = RENDER_HWS_PGA_GEN7;
break;
- case RING_BLT:
+ case BCS:
mmio = BLT_HWS_PGA_GEN7;
break;
- case RING_BSD:
+ case VCS:
mmio = BSD_HWS_PGA_GEN7;
break;
}
return 0;
}
-static bool
-gen7_blt_ring_get_irq(struct intel_ring_buffer *ring)
-{
- /* The BLT ring on IVB appears to have broken synchronization
- * between the seqno write and the interrupt, so that the
- * interrupt appears first. Returning false here makes
- * i915_wait_request() do a polling loop, instead.
- */
- return false;
-}
-
static bool
gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
{
if (!dev->irq_enabled)
return false;
+ /* It looks like we need to prevent the gt from suspending while waiting
+ * for an notifiy irq, otherwise irqs seem to get lost on at least the
+ * blt/bsd rings on ivb. */
+ if (IS_GEN7(dev))
+ gen6_gt_force_wake_get(dev_priv);
+
spin_lock(&ring->irq_lock);
if (ring->irq_refcount++ == 0) {
ring->irq_mask &= ~rflag;
ironlake_disable_irq(dev_priv, gflag);
}
spin_unlock(&ring->irq_lock);
+
+ if (IS_GEN7(dev))
+ gen6_gt_force_wake_put(dev_priv);
}
static bool
static const struct intel_ring_buffer render_ring = {
.name = "render ring",
- .id = RING_RENDER,
+ .id = RCS,
.mmio_base = RENDER_RING_BASE,
.size = 32 * PAGE_SIZE,
.init = init_render_ring,
static const struct intel_ring_buffer bsd_ring = {
.name = "bsd ring",
- .id = RING_BSD,
+ .id = VCS,
.mmio_base = BSD_RING_BASE,
.size = 32 * PAGE_SIZE,
.init = init_ring_common,
/* ring buffer for Video Codec for Gen6+ */
static const struct intel_ring_buffer gen6_bsd_ring = {
.name = "gen6 bsd ring",
- .id = RING_BSD,
+ .id = VCS,
.mmio_base = GEN6_BSD_RING_BASE,
.size = 32 * PAGE_SIZE,
.init = init_ring_common,
.write_tail = gen6_bsd_ring_write_tail,
.flush = gen6_ring_flush,
.add_request = gen6_add_request,
- .get_seqno = ring_get_seqno,
+ .get_seqno = gen6_ring_get_seqno,
.irq_get = gen6_bsd_ring_get_irq,
.irq_put = gen6_bsd_ring_put_irq,
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
GEN6_BLITTER_USER_INTERRUPT);
}
-
- /* Workaround for some stepping of SNB,
- * each time when BLT engine ring tail moved,
- * the first command in the ring to be parsed
- * should be MI_BATCH_BUFFER_START
- */
- #define NEED_BLT_WORKAROUND(dev) \
- (IS_GEN6(dev) && (dev->pdev->revision < 8))
-
- static inline struct drm_i915_gem_object *
- to_blt_workaround(struct intel_ring_buffer *ring)
- {
- return ring->private;
- }
-
- static int blt_ring_init(struct intel_ring_buffer *ring)
- {
- if (NEED_BLT_WORKAROUND(ring->dev)) {
- struct drm_i915_gem_object *obj;
- u32 *ptr;
- int ret;
-
- obj = i915_gem_alloc_object(ring->dev, 4096);
- if (obj == NULL)
- return -ENOMEM;
-
- ret = i915_gem_object_pin(obj, 4096, true);
- if (ret) {
- drm_gem_object_unreference(&obj->base);
- return ret;
- }
-
- ptr = kmap(obj->pages[0]);
- *ptr++ = MI_BATCH_BUFFER_END;
- *ptr++ = MI_NOOP;
- kunmap(obj->pages[0]);
-
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
- if (ret) {
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(&obj->base);
- return ret;
- }
-
- ring->private = obj;
- }
-
- return init_ring_common(ring);
- }
-
- static int blt_ring_begin(struct intel_ring_buffer *ring,
- int num_dwords)
- {
- if (ring->private) {
- int ret = intel_ring_begin(ring, num_dwords+2);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_BATCH_BUFFER_START);
- intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
-
- return 0;
- } else
- return intel_ring_begin(ring, 4);
- }
-
static int blt_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate, u32 flush)
{
uint32_t cmd;
int ret;
- ret = blt_ring_begin(ring, 4);
+ ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
return 0;
}
- static void blt_ring_cleanup(struct intel_ring_buffer *ring)
- {
- if (!ring->private)
- return;
-
- i915_gem_object_unpin(ring->private);
- drm_gem_object_unreference(ring->private);
- ring->private = NULL;
- }
-
static const struct intel_ring_buffer gen6_blt_ring = {
.name = "blt ring",
- .id = RING_BLT,
+ .id = BCS,
.mmio_base = BLT_RING_BASE,
.size = 32 * PAGE_SIZE,
- .init = blt_ring_init,
+ .init = init_ring_common,
.write_tail = ring_write_tail,
.flush = blt_ring_flush,
.add_request = gen6_add_request,
- .get_seqno = ring_get_seqno,
+ .get_seqno = gen6_ring_get_seqno,
.irq_get = blt_ring_get_irq,
.irq_put = blt_ring_put_irq,
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
- .cleanup = blt_ring_cleanup,
.sync_to = gen6_blt_ring_sync_to,
.semaphore_register = {MI_SEMAPHORE_SYNC_BR,
MI_SEMAPHORE_SYNC_BV,
ring->flush = gen6_render_ring_flush;
ring->irq_get = gen6_render_ring_get_irq;
ring->irq_put = gen6_render_ring_put_irq;
+ ring->get_seqno = gen6_ring_get_seqno;
} else if (IS_GEN5(dev)) {
ring->add_request = pc_render_add_request;
ring->get_seqno = pc_render_get_seqno;
*ring = gen6_blt_ring;
- if (IS_GEN7(dev))
- ring->irq_get = gen7_blt_ring_get_irq;
-
return intel_init_ring_buffer(dev, ring);
}
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
- if (ret) {
- DRM_ERROR("failed to pin object\n");
+ if (ret)
goto out_unlock;
- }
intel_plane->obj = obj;
intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
mutex_lock(&dev->struct_mutex);
}
- i915_gem_object_unpin(old_obj);
+ intel_unpin_fb_obj(old_obj);
}
out_unlock:
goto out;
mutex_lock(&dev->struct_mutex);
- i915_gem_object_unpin(intel_plane->obj);
+ intel_unpin_fb_obj(intel_plane->obj);
intel_plane->obj = NULL;
mutex_unlock(&dev->struct_mutex);
out:
unsigned long possible_crtcs;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
- DRM_ERROR("new plane code only for SNB+\n");
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
- }
intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
if (!intel_plane)