!Pdrivers/gpu/drm/i915/intel_csr.c csr support for dmc
!Idrivers/gpu/drm/i915/intel_csr.c
</sect2>
+ <sect2>
+ <title>Video BIOS Table (VBT)</title>
+!Pdrivers/gpu/drm/i915/intel_bios.c Video BIOS Table (VBT)
+!Idrivers/gpu/drm/i915/intel_bios.c
+!Idrivers/gpu/drm/i915/intel_bios.h
+ </sect2>
</sect1>
<sect1>
struct intel_engine_cs *ring;
u64 acthd[I915_NUM_RINGS];
u32 seqno[I915_NUM_RINGS];
- int i;
+ u32 instdone[I915_NUM_INSTDONE_REG];
+ int i, j;
if (!i915.enable_hangcheck) {
seq_printf(m, "Hangcheck disabled\n");
acthd[i] = intel_ring_get_active_head(ring);
}
+ i915_get_extra_instdone(dev, instdone);
+
intel_runtime_pm_put(dev_priv);
if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
(long long)ring->hangcheck.max_acthd);
seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
+
+ if (ring->id == RCS) {
+ seq_puts(m, "\tinstdone read =");
+
+ for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
+ seq_printf(m, " 0x%08x", instdone[j]);
+
+ seq_puts(m, "\n\tinstdone accu =");
+
+ for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
+ seq_printf(m, " 0x%08x",
+ ring->hangcheck.instdone[j]);
+
+ seq_puts(m, "\n");
+ }
}
return 0;
seq_puts(m, "HW context ");
describe_ctx(m, ctx);
- for_each_ring(ring, dev_priv, i) {
- if (ring->default_context == ctx)
- seq_printf(m, "(default context %s) ",
- ring->name);
- }
+ if (ctx == dev_priv->kernel_context)
+ seq_printf(m, "(kernel context) ");
if (i915.enable_execlists) {
seq_putc(m, '\n');
}
static void i915_dump_lrc_obj(struct seq_file *m,
- struct intel_engine_cs *ring,
- struct drm_i915_gem_object *ctx_obj)
+ struct intel_context *ctx,
+ struct intel_engine_cs *ring)
{
struct page *page;
uint32_t *reg_state;
int j;
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
unsigned long ggtt_offset = 0;
if (ctx_obj == NULL) {
}
seq_printf(m, "CONTEXT: %s %u\n", ring->name,
- intel_execlists_ctx_id(ctx_obj));
+ intel_execlists_ctx_id(ctx, ring));
if (!i915_gem_obj_ggtt_bound(ctx_obj))
seq_puts(m, "\tNot bound in GGTT\n");
if (ret)
return ret;
- list_for_each_entry(ctx, &dev_priv->context_list, link) {
- for_each_ring(ring, dev_priv, i) {
- if (ring->default_context != ctx)
- i915_dump_lrc_obj(m, ring,
- ctx->engine[i].state);
- }
- }
+ list_for_each_entry(ctx, &dev_priv->context_list, link)
+ if (ctx != dev_priv->kernel_context)
+ for_each_ring(ring, dev_priv, i)
+ i915_dump_lrc_obj(m, ctx, ring);
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
read_pointer = ring->next_context_status_buffer;
- write_pointer = status_pointer & 0x07;
+ write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
if (read_pointer > write_pointer)
- write_pointer += 6;
+ write_pointer += GEN8_CSB_ENTRIES;
seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
read_pointer, write_pointer);
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i));
ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i));
seq_printf(m, "\t%d requests in queue\n", count);
if (head_req) {
- struct drm_i915_gem_object *ctx_obj;
-
- ctx_obj = head_req->ctx->engine[ring_id].state;
seq_printf(m, "\tHead request id: %u\n",
- intel_execlists_ctx_id(ctx_obj));
+ intel_execlists_ctx_id(head_req->ctx, ring));
seq_printf(m, "\tHead request tail: %u\n",
head_req->tail);
}
*/
broxton_init_cdclk(dev);
broxton_ddi_phy_init(dev);
- intel_prepare_ddi(dev);
return 0;
}
return 0;
DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
- wait_for_on ? "on" : "off",
- I915_READ(VLV_GTLC_PW_STATUS));
+ onoff(wait_for_on),
+ I915_READ(VLV_GTLC_PW_STATUS));
/*
* RC6 transitioning can be delayed up to 2 msec (see
err = wait_for(COND, 3);
if (err)
DRM_ERROR("timeout waiting for GT wells to go %s\n",
- wait_for_on ? "on" : "off");
+ onoff(wait_for_on));
return err;
#undef COND
if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
return;
- DRM_ERROR("GT register access while GT waking disabled\n");
+ DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
}
enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
+
+ if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
+ DRM_ERROR("Unclaimed access detected prior to suspending\n");
+
dev_priv->pm.suspended = true;
/*
intel_opregion_notify_adapter(dev, PCI_D0);
dev_priv->pm.suspended = false;
+ if (intel_uncore_unclaimed_mmio(dev_priv))
+ DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
intel_guc_resume(dev);
#include <uapi/drm/drm_fourcc.h>
#include <drm/drmP.h>
+#include "i915_params.h"
#include "i915_reg.h"
#include "intel_bios.h"
#include "intel_ringbuffer.h"
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20151218"
+#define DRIVER_DATE "20160124"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
BUILD_BUG_ON(__i915_warn_cond); \
WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
#else
-#define WARN_ON(x) WARN((x), "WARN_ON(%s)", #x )
+#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
#endif
#undef WARN_ON_ONCE
-#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(%s)", #x )
+#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
(long) (x), __func__);
*/
#define I915_STATE_WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
- if (unlikely(__ret_warn_on)) { \
- if (i915.verbose_state_checks) \
- WARN(1, format); \
- else \
+ if (unlikely(__ret_warn_on)) \
+ if (!WARN(i915.verbose_state_checks, format)) \
DRM_ERROR(format); \
- } \
unlikely(__ret_warn_on); \
})
-#define I915_STATE_WARN_ON(condition) ({ \
- int __ret_warn_on = !!(condition); \
- if (unlikely(__ret_warn_on)) { \
- if (i915.verbose_state_checks) \
- WARN(1, "WARN_ON(" #condition ")\n"); \
- else \
- DRM_ERROR("WARN_ON(" #condition ")\n"); \
- } \
- unlikely(__ret_warn_on); \
-})
+#define I915_STATE_WARN_ON(x) \
+ I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
static inline const char *yesno(bool v)
{
return v ? "yes" : "no";
}
+static inline const char *onoff(bool v)
+{
+ return v ? "on" : "off";
+}
+
enum pipe {
INVALID_PIPE = -1,
PIPE_A = 0,
unsigned boosts;
} rps;
- struct intel_engine_cs *bsd_ring;
+ unsigned int bsd_ring;
};
enum intel_dpll_id {
struct dpll *best_clock);
int (*compute_pipe_wm)(struct intel_crtc *crtc,
struct drm_atomic_state *state);
+ void (*program_watermarks)(struct intel_crtc_state *cstate);
void (*update_wm)(struct drm_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
uint32_t flags);
- void (*update_primary_plane)(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y);
void (*hpd_irq_setup)(struct drm_device *dev);
/* clock updates for mode set */
/* cursor updates */
i915_reg_t reg_post;
u32 val_reset;
} fw_domain[FW_DOMAIN_ID_COUNT];
+
+ int unclaimed_mmio_check;
};
/* Iterate over initialised fw domains */
struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf;
int pin_count;
+ struct i915_vma *lrc_vma;
+ u64 lrc_desc;
+ uint32_t *lrc_reg_state;
} engine[I915_NUM_RINGS];
struct list_head link;
bool busy;
/* the indicator for dispatch video commands on two BSD rings */
- int bsd_ring_dispatch_index;
+ unsigned int bsd_ring_dispatch_index;
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
u8 seq_version;
u32 size;
u8 *data;
- u8 *sequence[MIPI_SEQ_MAX];
+ const u8 *sequence[MIPI_SEQ_MAX];
} dsi;
int crt_ddc_pin;
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int skl_boot_cdclk;
- unsigned int cdclk_freq, max_cdclk_freq;
+ unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
unsigned int max_dotclk_freq;
unsigned int hpll_freq;
unsigned int czclk_freq;
struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
#endif
+ /* dpll and cdclk state is protected by connection_mutex */
int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+
+ unsigned int active_crtcs;
+ unsigned int min_pixclk[I915_MAX_PIPES];
+
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
struct i915_workarounds workarounds;
void (*stop_ring)(struct intel_engine_cs *ring);
} gt;
+ struct intel_context *kernel_context;
+
bool edp_low_vswing;
/* perform PHY state sanity checks? */
};
-int i915_gem_request_alloc(struct intel_engine_cs *ring,
- struct intel_context *ctx,
- struct drm_i915_gem_request **req_out);
+struct drm_i915_gem_request * __must_check
+i915_gem_request_alloc(struct intel_engine_cs *engine,
+ struct intel_context *ctx);
void i915_gem_request_cancel(struct drm_i915_gem_request *req);
void i915_gem_request_free(struct kref *req_ref);
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
+
+/* WaRsDisableCoarsePowerGating:skl,bxt */
+#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
+ ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \
+ IS_SKL_REVID(dev, 0, SKL_REVID_F0)))
/*
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
* even when in MSI mode. This results in spurious interrupt warnings if the
extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
extern int i915_resume_switcheroo(struct drm_device *dev);
-/* i915_params.c */
-struct i915_params {
- int modeset;
- int panel_ignore_lid;
- int semaphores;
- int lvds_channel_mode;
- int panel_use_ssc;
- int vbt_sdvo_panel_type;
- int enable_rc6;
- int enable_dc;
- int enable_fbc;
- int enable_ppgtt;
- int enable_execlists;
- int enable_psr;
- unsigned int preliminary_hw_support;
- int disable_power_well;
- int enable_ips;
- int invert_brightness;
- int enable_cmd_parser;
- /* leave bools at the end to not create holes */
- bool enable_hangcheck;
- bool fastboot;
- bool prefault_disable;
- bool load_detect_test;
- bool reset;
- bool disable_display;
- bool disable_vtd_wa;
- bool enable_guc_submission;
- int guc_log_level;
- int use_mmio_flip;
- int mmio_debug;
- bool verbose_state_checks;
- bool nuclear_pageflip;
- int edp_vswing;
-};
-extern struct i915_params i915 __read_mostly;
-
- /* i915_dma.c */
+/* i915_dma.c */
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
extern void intel_uncore_early_sanitize(struct drm_device *dev,
bool restore_forcewake);
extern void intel_uncore_init(struct drm_device *dev);
-extern void intel_uncore_check_errors(struct drm_device *dev);
+extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
+extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
extern void intel_uncore_fini(struct drm_device *dev);
extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(wait);
unsigned long timeout_expire;
- s64 before, now;
+ s64 before = 0; /* Only to silence a compiler warning. */
int ret;
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
return -ETIME;
timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
+
+ /*
+ * Record current time in case interrupted by signal, or wedged.
+ */
+ before = ktime_get_raw_ns();
}
if (INTEL_INFO(dev_priv)->gen >= 6)
gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
- /* Record current time in case interrupted by signal, or wedged */
trace_i915_gem_request_wait_begin(req);
- before = ktime_get_raw_ns();
/* Optimistic spin for the next jiffie before touching IRQs */
ret = __i915_spin_request(req, state);
finish_wait(&ring->irq_queue, &wait);
out:
- now = ktime_get_raw_ns();
trace_i915_gem_request_wait_end(req);
if (timeout) {
- s64 tres = *timeout - (now - before);
+ s64 tres = *timeout - (ktime_get_raw_ns() - before);
*timeout = tres < 0 ? 0 : tres;
i915_gem_request_remove_from_client(req);
if (ctx) {
- if (i915.enable_execlists) {
- if (ctx != req->ring->default_context)
- intel_lr_context_unpin(req);
- }
+ if (i915.enable_execlists && ctx != req->i915->kernel_context)
+ intel_lr_context_unpin(req);
i915_gem_context_unreference(ctx);
}
kmem_cache_free(req->i915->requests, req);
}
-int i915_gem_request_alloc(struct intel_engine_cs *ring,
- struct intel_context *ctx,
- struct drm_i915_gem_request **req_out)
+static inline int
+__i915_gem_request_alloc(struct intel_engine_cs *ring,
+ struct intel_context *ctx,
+ struct drm_i915_gem_request **req_out)
{
struct drm_i915_private *dev_priv = to_i915(ring->dev);
struct drm_i915_gem_request *req;
return ret;
}
+/**
+ * i915_gem_request_alloc - allocate a request structure
+ *
+ * @engine: engine that we wish to issue the request on.
+ * @ctx: context that the request will be associated with.
+ * This can be NULL if the request is not directly related to
+ * any specific user context, in which case this function will
+ * choose an appropriate context to use.
+ *
+ * Returns a pointer to the allocated request if successful,
+ * or an error code if not.
+ */
+struct drm_i915_gem_request *
+i915_gem_request_alloc(struct intel_engine_cs *engine,
+ struct intel_context *ctx)
+{
+ struct drm_i915_gem_request *req;
+ int err;
+
+ if (ctx == NULL)
+ ctx = to_i915(engine->dev)->kernel_context;
+ err = __i915_gem_request_alloc(engine, ctx, &req);
+ return err ? ERR_PTR(err) : req;
+}
+
void i915_gem_request_cancel(struct drm_i915_gem_request *req)
{
intel_ring_reserved_space_cancel(req->ringbuf);
return 0;
if (*to_req == NULL) {
- ret = i915_gem_request_alloc(to, to->default_context, to_req);
- if (ret)
- return ret;
+ struct drm_i915_gem_request *req;
+
+ req = i915_gem_request_alloc(to, NULL);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ *to_req = req;
}
trace_i915_gem_ring_sync_to(*to_req, from, from_req);
if (!i915.enable_execlists) {
struct drm_i915_gem_request *req;
- ret = i915_gem_request_alloc(ring, ring->default_context, &req);
- if (ret)
- return ret;
+ req = i915_gem_request_alloc(ring, NULL);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
ret = i915_switch_context(req);
if (ret) {
if (ret)
goto unref;
- BUILD_BUG_ON(I915_NUM_RINGS > 16);
- args->busy = obj->active << 16;
- if (obj->last_write_req)
- args->busy |= obj->last_write_req->ring->id;
+ args->busy = 0;
+ if (obj->active) {
+ int i;
+
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct drm_i915_gem_request *req;
+
+ req = obj->last_read_req[i];
+ if (req)
+ args->busy |= 1 << (16 + req->ring->exec_id);
+ }
+ if (obj->last_write_req)
+ args->busy |= obj->last_write_req->ring->exec_id;
+ }
unref:
drm_gem_object_unreference(&obj->base);
*/
init_unused_rings(dev);
- BUG_ON(!dev_priv->ring[RCS].default_context);
+ BUG_ON(!dev_priv->kernel_context);
ret = i915_ppgtt_init_hw(dev);
if (ret) {
for_each_ring(ring, dev_priv, i) {
struct drm_i915_gem_request *req;
- WARN_ON(!ring->default_context);
-
- ret = i915_gem_request_alloc(ring, ring->default_context, &req);
- if (ret) {
+ req = i915_gem_request_alloc(ring, NULL);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
i915_gem_cleanup_ringbuffer(dev);
goto out;
}
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
+ file_priv->bsd_ring = -1;
+
ret = i915_gem_context_open(dev, file);
if (ret)
kfree(file_priv);
i915_gem_context_unreference(lctx);
ring->last_context = NULL;
}
-
- /* Force the GPU state to be reinitialised on enabling */
- if (ring->default_context)
- ring->default_context->legacy_hw_ctx.initialized = false;
}
+
+ /* Force the GPU state to be reinitialised on enabling */
+ dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
}
int i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_context *ctx;
- int i;
/* Init should only be called once per module load. Eventually the
* restriction on the context_disabled check can be loosened. */
- if (WARN_ON(dev_priv->ring[RCS].default_context))
+ if (WARN_ON(dev_priv->kernel_context))
return 0;
if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
return PTR_ERR(ctx);
}
- for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_engine_cs *ring = &dev_priv->ring[i];
-
- /* NB: RCS will hold a ref for all rings */
- ring->default_context = ctx;
- }
+ dev_priv->kernel_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n",
i915.enable_execlists ? "LR" :
void i915_gem_context_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_context *dctx = dev_priv->ring[RCS].default_context;
+ struct intel_context *dctx = dev_priv->kernel_context;
int i;
if (dctx->legacy_hw_ctx.rcs_state) {
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
}
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for (i = I915_NUM_RINGS; --i >= 0;) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
- if (ring->last_context)
+ if (ring->last_context) {
i915_gem_context_unreference(ring->last_context);
-
- ring->default_context = NULL;
- ring->last_context = NULL;
+ ring->last_context = NULL;
+ }
}
i915_gem_context_unreference(dctx);
+ dev_priv->kernel_context = NULL;
}
int i915_gem_context_enable(struct drm_i915_gem_request *req)
return eb->lut[handle];
} else {
struct hlist_head *head;
- struct hlist_node *node;
+ struct i915_vma *vma;
head = &eb->buckets[handle & eb->and];
- hlist_for_each(node, head) {
- struct i915_vma *vma;
-
- vma = hlist_entry(node, struct i915_vma, exec_node);
+ hlist_for_each_entry(vma, head, exec_node) {
if (vma->exec_handle == handle)
return vma;
}
exec_start = params->batch_obj_vm_offset +
params->args_batch_start_offset;
+ if (exec_len == 0)
+ exec_len = params->batch_obj->base.size;
+
ret = ring->dispatch_execbuffer(params->request,
exec_start, exec_len,
params->dispatch_flags);
/**
* Find one BSD ring to dispatch the corresponding BSD command.
- * The Ring ID is returned.
+ * The ring index is returned.
*/
-static int gen8_dispatch_bsd_ring(struct drm_device *dev,
- struct drm_file *file)
+static unsigned int
+gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
- /* Check whether the file_priv is using one ring */
- if (file_priv->bsd_ring)
- return file_priv->bsd_ring->id;
- else {
- /* If no, use the ping-pong mechanism to select one ring */
- int ring_id;
-
- mutex_lock(&dev->struct_mutex);
- if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
- ring_id = VCS;
- dev_priv->mm.bsd_ring_dispatch_index = 1;
- } else {
- ring_id = VCS2;
- dev_priv->mm.bsd_ring_dispatch_index = 0;
- }
- file_priv->bsd_ring = &dev_priv->ring[ring_id];
- mutex_unlock(&dev->struct_mutex);
- return ring_id;
+ /* Check whether the file_priv has already selected one ring. */
+ if ((int)file_priv->bsd_ring < 0) {
+ /* If not, use the ping-pong mechanism to select one. */
+ mutex_lock(&dev_priv->dev->struct_mutex);
+ file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
+ dev_priv->mm.bsd_ring_dispatch_index ^= 1;
+ mutex_unlock(&dev_priv->dev->struct_mutex);
}
+
+ return file_priv->bsd_ring;
}
static struct drm_i915_gem_object *
return vma->obj;
}
+#define I915_USER_RINGS (4)
+
+static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = {
+ [I915_EXEC_DEFAULT] = RCS,
+ [I915_EXEC_RENDER] = RCS,
+ [I915_EXEC_BLT] = BCS,
+ [I915_EXEC_BSD] = VCS,
+ [I915_EXEC_VEBOX] = VECS
+};
+
+static int
+eb_select_ring(struct drm_i915_private *dev_priv,
+ struct drm_file *file,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct intel_engine_cs **ring)
+{
+ unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
+
+ if (user_ring_id > I915_USER_RINGS) {
+ DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
+ return -EINVAL;
+ }
+
+ if ((user_ring_id != I915_EXEC_BSD) &&
+ ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
+ DRM_DEBUG("execbuf with non bsd ring but with invalid "
+ "bsd dispatch flags: %d\n", (int)(args->flags));
+ return -EINVAL;
+ }
+
+ if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
+ unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
+
+ if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
+ bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
+ } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
+ bsd_idx <= I915_EXEC_BSD_RING2) {
+ bsd_idx--;
+ } else {
+ DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
+ bsd_idx);
+ return -EINVAL;
+ }
+
+ *ring = &dev_priv->ring[_VCS(bsd_idx)];
+ } else {
+ *ring = &dev_priv->ring[user_ring_map[user_ring_id]];
+ }
+
+ if (!intel_ring_initialized(*ring)) {
+ DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_exec_object2 *exec)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *req = NULL;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_i915_gem_exec_object2 shadow_exec_entry;
if (args->flags & I915_EXEC_IS_PINNED)
dispatch_flags |= I915_DISPATCH_PINNED;
- if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
- DRM_DEBUG("execbuf with unknown ring: %d\n",
- (int)(args->flags & I915_EXEC_RING_MASK));
- return -EINVAL;
- }
-
- if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
- ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
- DRM_DEBUG("execbuf with non bsd ring but with invalid "
- "bsd dispatch flags: %d\n", (int)(args->flags));
- return -EINVAL;
- }
-
- if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
- ring = &dev_priv->ring[RCS];
- else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
- if (HAS_BSD2(dev)) {
- int ring_id;
-
- switch (args->flags & I915_EXEC_BSD_MASK) {
- case I915_EXEC_BSD_DEFAULT:
- ring_id = gen8_dispatch_bsd_ring(dev, file);
- ring = &dev_priv->ring[ring_id];
- break;
- case I915_EXEC_BSD_RING1:
- ring = &dev_priv->ring[VCS];
- break;
- case I915_EXEC_BSD_RING2:
- ring = &dev_priv->ring[VCS2];
- break;
- default:
- DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
- (int)(args->flags & I915_EXEC_BSD_MASK));
- return -EINVAL;
- }
- } else
- ring = &dev_priv->ring[VCS];
- } else
- ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
-
- if (!intel_ring_initialized(ring)) {
- DRM_DEBUG("execbuf with invalid ring: %d\n",
- (int)(args->flags & I915_EXEC_RING_MASK));
- return -EINVAL;
- }
+ ret = eb_select_ring(dev_priv, file, args, &ring);
+ if (ret)
+ return ret;
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
/* Allocate a request for this batch buffer nice and early. */
- ret = i915_gem_request_alloc(ring, ctx, ¶ms->request);
- if (ret)
+ req = i915_gem_request_alloc(ring, ctx);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
goto err_batch_unpin;
+ }
- ret = i915_gem_request_add_to_client(params->request, file);
+ ret = i915_gem_request_add_to_client(req, file);
if (ret)
goto err_batch_unpin;
params->dispatch_flags = dispatch_flags;
params->batch_obj = batch_obj;
params->ctx = ctx;
+ params->request = req;
ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
* must be freed again. If it was submitted then it is being tracked
* on the active request list and no clean up is required here.
*/
- if (ret && params->request)
- i915_gem_request_cancel(params->request);
+ if (ret && req)
+ i915_gem_request_cancel(req);
mutex_unlock(&dev->struct_mutex);
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
-const struct i915_ggtt_view i915_ggtt_view_normal;
+const struct i915_ggtt_view i915_ggtt_view_normal = {
+ .type = I915_GGTT_VIEW_NORMAL,
+};
const struct i915_ggtt_view i915_ggtt_view_rotated = {
- .type = I915_GGTT_VIEW_ROTATED
+ .type = I915_GGTT_VIEW_ROTATED,
};
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
}
static struct scatterlist *
-rotate_pages(dma_addr_t *in, unsigned int offset,
+rotate_pages(const dma_addr_t *in, unsigned int offset,
unsigned int width, unsigned int height,
struct sg_table *st, struct scatterlist *sg)
{
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
-
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
#endif
}
+static int num_vma_bound(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+ int count = 0;
+
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (drm_mm_node_allocated(&vma->node))
+ count++;
+ if (vma->pin_count)
+ count++;
+ }
+
+ return count;
+}
+
+static bool swap_available(void)
+{
+ return get_nr_swap_pages() > 0;
+}
+
+static bool can_release_pages(struct drm_i915_gem_object *obj)
+{
+ /* Only report true if by unbinding the object and putting its pages
+ * we can actually make forward progress towards freeing physical
+ * pages.
+ *
+ * If the pages are pinned for any other reason than being bound
+ * to the GPU, simply unbinding from the GPU is not going to succeed
+ * in releasing our pin count on the pages themselves.
+ */
+ if (obj->pages_pin_count != num_vma_bound(obj))
+ return false;
+
+ /* We can only return physical pages to the system if we can either
+ * discard the contents (because the user has marked them as being
+ * purgeable) or if we can move their contents out to swap.
+ */
+ return swap_available() || obj->madv == I915_MADV_DONTNEED;
+}
+
/**
* i915_gem_shrink - Shrink buffer object caches
* @dev_priv: i915 device
if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
continue;
+ if (!can_release_pages(obj))
+ continue;
+
drm_gem_object_reference(&obj->base);
/* For the unbound phase, this should be a no-op! */
return true;
}
-static int num_vma_bound(struct drm_i915_gem_object *obj)
-{
- struct i915_vma *vma;
- int count = 0;
-
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
- if (drm_mm_node_allocated(&vma->node))
- count++;
- if (vma->pin_count)
- count++;
- }
-
- return count;
-}
-
static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (!obj->active && obj->pages_pin_count == num_vma_bound(obj))
+ if (!obj->active && can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
}
if (obj->pages == NULL)
goto cleanup;
+ obj->get_page.sg = obj->pages->sgl;
+ obj->get_page.last = 0;
+
i915_gem_object_pin_pages(obj);
obj->stolen = stolen;
if (request)
rbuf = request->ctx->engine[ring->id].ringbuf;
else
- rbuf = ring->default_context->engine[ring->id].ringbuf;
+ rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf;
} else
rbuf = ring->buffer;
#define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT)
#define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4)
+#define SOFT_SCRATCH_COUNT 16
#define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4)
#define UOS_RSA_SCRATCH_MAX_COUNT 64
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
/* WaRsDisableCoarsePowerGating:skl,bxt */
- if (!intel_enable_rc6(dev_priv->dev) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1) ||
- (IS_SKL_GT3(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)) ||
- (IS_SKL_GT4(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
+ if (!intel_enable_rc6(dev) ||
+ NEEDS_WaRsDisableCoarsePowerGating(dev))
data[1] = 0;
else
/* bit 0 and 1 are for Render and Media domain separately */
db_exc.cookie = 1;
}
+ /* Finally, update the cached copy of the GuC's WQ head */
+ gc->wq_head = desc->head;
+
kunmap_atomic(base);
return ret;
}
sizeof(desc) * client->ctx_index);
}
-/* Get valid workqueue item and return it back to offset */
-static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
+int i915_guc_wq_check_space(struct i915_guc_client *gc)
{
struct guc_process_desc *desc;
void *base;
u32 size = sizeof(struct guc_wq_item);
int ret = -ETIMEDOUT, timeout_counter = 200;
+ if (!gc)
+ return 0;
+
+ /* Quickly return if wq space is available since last time we cache the
+ * head position. */
+ if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
+ return 0;
+
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
desc = base + gc->proc_desc_offset;
while (timeout_counter-- > 0) {
- if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
- *offset = gc->wq_tail;
-
- /* advance the tail for next workqueue item */
- gc->wq_tail += size;
- gc->wq_tail &= gc->wq_size - 1;
+ gc->wq_head = desc->head;
- /* this will break the loop */
- timeout_counter = 0;
+ if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
ret = 0;
+ break;
}
if (timeout_counter)
enum intel_ring_id ring_id = rq->ring->id;
struct guc_wq_item *wqi;
void *base;
- u32 tail, wq_len, wq_off = 0;
- int ret;
+ u32 tail, wq_len, wq_off, space;
+
+ space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
+ if (WARN_ON(space < sizeof(struct guc_wq_item)))
+ return -ENOSPC; /* shouldn't happen */
- ret = guc_get_workqueue_space(gc, &wq_off);
- if (ret)
- return ret;
+ /* postincrement WQ tail for next time */
+ wq_off = gc->wq_tail;
+ gc->wq_tail += sizeof(struct guc_wq_item);
+ gc->wq_tail &= gc->wq_size - 1;
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
* should not have the case where structure wqi is across page, neither
guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
}
+static void init_guc_policies(struct guc_policies *policies)
+{
+ struct guc_policy *policy;
+ u32 p, i;
+
+ policies->dpc_promote_time = 500000;
+ policies->max_num_work_items = POLICY_MAX_NUM_WI;
+
+ for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) {
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ policy = &policies->policy[p][i];
+
+ policy->execution_quantum = 1000000;
+ policy->preemption_time = 500000;
+ policy->fault_time = 250000;
+ policy->policy_flags = 0;
+ }
+ }
+
+ policies->is_valid = 1;
+}
+
+static void guc_create_ads(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_gem_object *obj;
+ struct guc_ads *ads;
+ struct guc_policies *policies;
+ struct guc_mmio_reg_state *reg_state;
+ struct intel_engine_cs *ring;
+ struct page *page;
+ u32 size, i;
+
+ /* The ads obj includes the struct itself and buffers passed to GuC */
+ size = sizeof(struct guc_ads) + sizeof(struct guc_policies) +
+ sizeof(struct guc_mmio_reg_state) +
+ GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE;
+
+ obj = guc->ads_obj;
+ if (!obj) {
+ obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size));
+ if (!obj)
+ return;
+
+ guc->ads_obj = obj;
+ }
+
+ page = i915_gem_object_get_page(obj, 0);
+ ads = kmap(page);
+
+ /*
+ * The GuC requires a "Golden Context" when it reinitialises
+ * engines after a reset. Here we use the Render ring default
+ * context, which must already exist and be pinned in the GGTT,
+ * so its address won't change after we've told the GuC where
+ * to find it.
+ */
+ ring = &dev_priv->ring[RCS];
+ ads->golden_context_lrca = ring->status_page.gfx_addr;
+
+ for_each_ring(ring, dev_priv, i)
+ ads->eng_state_size[i] = intel_lr_context_size(ring);
+
+ /* GuC scheduling policies */
+ policies = (void *)ads + sizeof(struct guc_ads);
+ init_guc_policies(policies);
+
+ ads->scheduler_policies = i915_gem_obj_ggtt_offset(obj) +
+ sizeof(struct guc_ads);
+
+ /* MMIO reg state */
+ reg_state = (void *)policies + sizeof(struct guc_policies);
+
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ reg_state->mmio_white_list[i].mmio_start =
+ dev_priv->ring[i].mmio_base + GUC_MMIO_WHITE_LIST_START;
+
+ /* Nothing to be saved or restored for now. */
+ reg_state->mmio_white_list[i].count = 0;
+ }
+
+ ads->reg_state_addr = ads->scheduler_policies +
+ sizeof(struct guc_policies);
+
+ ads->reg_state_buffer = ads->reg_state_addr +
+ sizeof(struct guc_mmio_reg_state);
+
+ kunmap(page);
+}
+
/*
* Set up the memory resources to be shared with the GuC. At this point,
* we require just one object that can be mapped through the GGTT.
guc_create_log(guc);
+ guc_create_ads(guc);
+
return 0;
}
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
- struct intel_context *ctx = dev_priv->ring[RCS].default_context;
+ struct intel_context *ctx = dev_priv->kernel_context;
struct i915_guc_client *client;
/* client for execbuf submission */
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
+ gem_release_guc_obj(dev_priv->guc.ads_obj);
+ guc->ads_obj = NULL;
+
gem_release_guc_obj(dev_priv->guc.log_obj);
guc->log_obj = NULL;
if (!i915.enable_guc_submission)
return 0;
- ctx = dev_priv->ring[RCS].default_context;
+ ctx = dev_priv->kernel_context;
data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
/* any value greater than GUC_POWER_D0 */
if (!i915.enable_guc_submission)
return 0;
- ctx = dev_priv->ring[RCS].default_context;
+ ctx = dev_priv->kernel_context;
data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
data[1] = GUC_POWER_D0;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
- /* We get interrupts on unclaimed registers, so check for this before we
- * do any I915_{READ,WRITE}. */
- intel_uncore_check_errors(dev);
-
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
intel_hpd_irq_handler(dev, pin_mask, long_mask);
}
-static irqreturn_t gen8_irq_handler(int irq, void *arg)
+static irqreturn_t
+gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
- struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 master_ctl;
+ struct drm_device *dev = dev_priv->dev;
irqreturn_t ret = IRQ_NONE;
- uint32_t tmp = 0;
+ u32 iir;
enum pipe pipe;
- u32 aux_mask = GEN8_AUX_CHANNEL_A;
-
- if (!intel_irqs_enabled(dev_priv))
- return IRQ_NONE;
-
- /* IRQs are synced during runtime_suspend, we don't require a wakeref */
- disable_rpm_wakeref_asserts(dev_priv);
-
- if (INTEL_INFO(dev_priv)->gen >= 9)
- aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
- GEN9_AUX_CHANNEL_D;
-
- master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
- master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
- if (!master_ctl)
- goto out;
-
- I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
-
- /* Find, clear, then process each source of interrupt */
-
- ret = gen8_gt_irq_handler(dev_priv, master_ctl);
if (master_ctl & GEN8_DE_MISC_IRQ) {
- tmp = I915_READ(GEN8_DE_MISC_IIR);
- if (tmp) {
- I915_WRITE(GEN8_DE_MISC_IIR, tmp);
+ iir = I915_READ(GEN8_DE_MISC_IIR);
+ if (iir) {
+ I915_WRITE(GEN8_DE_MISC_IIR, iir);
ret = IRQ_HANDLED;
- if (tmp & GEN8_DE_MISC_GSE)
+ if (iir & GEN8_DE_MISC_GSE)
intel_opregion_asle_intr(dev);
else
DRM_ERROR("Unexpected DE Misc interrupt\n");
}
if (master_ctl & GEN8_DE_PORT_IRQ) {
- tmp = I915_READ(GEN8_DE_PORT_IIR);
- if (tmp) {
+ iir = I915_READ(GEN8_DE_PORT_IIR);
+ if (iir) {
+ u32 tmp_mask;
bool found = false;
- u32 hotplug_trigger = 0;
-
- if (IS_BROXTON(dev_priv))
- hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
- else if (IS_BROADWELL(dev_priv))
- hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
- I915_WRITE(GEN8_DE_PORT_IIR, tmp);
+ I915_WRITE(GEN8_DE_PORT_IIR, iir);
ret = IRQ_HANDLED;
- if (tmp & aux_mask) {
+ tmp_mask = GEN8_AUX_CHANNEL_A;
+ if (INTEL_INFO(dev_priv)->gen >= 9)
+ tmp_mask |= GEN9_AUX_CHANNEL_B |
+ GEN9_AUX_CHANNEL_C |
+ GEN9_AUX_CHANNEL_D;
+
+ if (iir & tmp_mask) {
dp_aux_irq_handler(dev);
found = true;
}
- if (hotplug_trigger) {
- if (IS_BROXTON(dev))
- bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
- else
- ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
- found = true;
+ if (IS_BROXTON(dev_priv)) {
+ tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
+ if (tmp_mask) {
+ bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt);
+ found = true;
+ }
+ } else if (IS_BROADWELL(dev_priv)) {
+ tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
+ if (tmp_mask) {
+ ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw);
+ found = true;
+ }
}
- if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
+ if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) {
gmbus_irq_handler(dev);
found = true;
}
}
for_each_pipe(dev_priv, pipe) {
- uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
+ u32 flip_done, fault_errors;
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
continue;
- pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
- if (pipe_iir) {
- ret = IRQ_HANDLED;
- I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+ iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
+ if (!iir) {
+ DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
+ continue;
+ }
- if (pipe_iir & GEN8_PIPE_VBLANK &&
- intel_pipe_handle_vblank(dev, pipe))
- intel_check_page_flip(dev, pipe);
+ ret = IRQ_HANDLED;
+ I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
- if (INTEL_INFO(dev_priv)->gen >= 9)
- flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
- else
- flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
+ if (iir & GEN8_PIPE_VBLANK &&
+ intel_pipe_handle_vblank(dev, pipe))
+ intel_check_page_flip(dev, pipe);
- if (flip_done) {
- intel_prepare_page_flip(dev, pipe);
- intel_finish_page_flip_plane(dev, pipe);
- }
+ flip_done = iir;
+ if (INTEL_INFO(dev_priv)->gen >= 9)
+ flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
+ else
+ flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
- if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
- hsw_pipe_crc_irq_handler(dev, pipe);
+ if (flip_done) {
+ intel_prepare_page_flip(dev, pipe);
+ intel_finish_page_flip_plane(dev, pipe);
+ }
- if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
- intel_cpu_fifo_underrun_irq_handler(dev_priv,
- pipe);
+ if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
+ hsw_pipe_crc_irq_handler(dev, pipe);
+ if (iir & GEN8_PIPE_FIFO_UNDERRUN)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
- if (INTEL_INFO(dev_priv)->gen >= 9)
- fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
- else
- fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+ fault_errors = iir;
+ if (INTEL_INFO(dev_priv)->gen >= 9)
+ fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+ else
+ fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
- if (fault_errors)
- DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
- pipe_name(pipe),
- pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
- } else
- DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
+ if (fault_errors)
+ DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
+ pipe_name(pipe),
+ fault_errors);
}
if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
* scheme also closed the SDE interrupt handling race we've seen
* on older pch-split platforms. But this needs testing.
*/
- u32 pch_iir = I915_READ(SDEIIR);
- if (pch_iir) {
- I915_WRITE(SDEIIR, pch_iir);
+ iir = I915_READ(SDEIIR);
+ if (iir) {
+ I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
if (HAS_PCH_SPT(dev_priv))
- spt_irq_handler(dev, pch_iir);
+ spt_irq_handler(dev, iir);
else
- cpt_irq_handler(dev, pch_iir);
+ cpt_irq_handler(dev, iir);
} else {
/*
* Like on previous PCH there seems to be something
}
}
+ return ret;
+}
+
+static irqreturn_t gen8_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 master_ctl;
+ irqreturn_t ret;
+
+ if (!intel_irqs_enabled(dev_priv))
+ return IRQ_NONE;
+
+ master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
+ master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
+ if (!master_ctl)
+ return IRQ_NONE;
+
+ I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
+
+ /* IRQs are synced during runtime_suspend, we don't require a wakeref */
+ disable_rpm_wakeref_asserts(dev_priv);
+
+ /* Find, clear, then process each source of interrupt */
+ ret = gen8_gt_irq_handler(dev_priv, master_ctl);
+ ret |= gen8_de_irq_handler(dev_priv, master_ctl);
+
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ_FW(GEN8_MASTER_IRQ);
-out:
enable_rpm_wakeref_asserts(dev_priv);
return ret;
ring->hangcheck.deadlock = 0;
}
-static enum intel_ring_hangcheck_action
-ring_stuck(struct intel_engine_cs *ring, u64 acthd)
+static bool subunits_stuck(struct intel_engine_cs *ring)
{
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 tmp;
+ u32 instdone[I915_NUM_INSTDONE_REG];
+ bool stuck;
+ int i;
+
+ if (ring->id != RCS)
+ return true;
+
+ i915_get_extra_instdone(ring->dev, instdone);
+
+ /* There might be unstable subunit states even when
+ * actual head is not moving. Filter out the unstable ones by
+ * accumulating the undone -> done transitions and only
+ * consider those as progress.
+ */
+ stuck = true;
+ for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
+ const u32 tmp = instdone[i] | ring->hangcheck.instdone[i];
+
+ if (tmp != ring->hangcheck.instdone[i])
+ stuck = false;
+
+ ring->hangcheck.instdone[i] |= tmp;
+ }
+ return stuck;
+}
+
+static enum intel_ring_hangcheck_action
+head_stuck(struct intel_engine_cs *ring, u64 acthd)
+{
if (acthd != ring->hangcheck.acthd) {
+
+ /* Clear subunit states on head movement */
+ memset(ring->hangcheck.instdone, 0,
+ sizeof(ring->hangcheck.instdone));
+
if (acthd > ring->hangcheck.max_acthd) {
ring->hangcheck.max_acthd = acthd;
return HANGCHECK_ACTIVE;
return HANGCHECK_ACTIVE_LOOP;
}
+ if (!subunits_stuck(ring))
+ return HANGCHECK_ACTIVE;
+
+ return HANGCHECK_HUNG;
+}
+
+static enum intel_ring_hangcheck_action
+ring_stuck(struct intel_engine_cs *ring, u64 acthd)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_ring_hangcheck_action ha;
+ u32 tmp;
+
+ ha = head_stuck(ring, acthd);
+ if (ha != HANGCHECK_HUNG)
+ return ha;
+
if (IS_GEN2(dev))
return HANGCHECK_HUNG;
*/
DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
+ /* As enabling the GPU requires fairly extensive mmio access,
+ * periodically arm the mmio checker to see if we are triggering
+ * any invalid access.
+ */
+ intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+
for_each_ring(ring, dev_priv, i) {
u64 acthd;
u32 seqno;
if (ring->hangcheck.score > 0)
ring->hangcheck.score--;
+ /* Clear head and subunit states on seqno movement */
ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
+
+ memset(ring->hangcheck.instdone, 0,
+ sizeof(ring->hangcheck.instdone));
}
ring->hangcheck.seqno = seqno;
* IN THE SOFTWARE.
*/
+#include "i915_params.h"
#include "i915_drv.h"
struct i915_params i915 __read_mostly = {
--- /dev/null
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _I915_PARAMS_H_
+#define _I915_PARAMS_H_
+
+#include <linux/cache.h> /* for __read_mostly */
+
+struct i915_params {
+ int modeset;
+ int panel_ignore_lid;
+ int semaphores;
+ int lvds_channel_mode;
+ int panel_use_ssc;
+ int vbt_sdvo_panel_type;
+ int enable_rc6;
+ int enable_dc;
+ int enable_fbc;
+ int enable_ppgtt;
+ int enable_execlists;
+ int enable_psr;
+ unsigned int preliminary_hw_support;
+ int disable_power_well;
+ int enable_ips;
+ int invert_brightness;
+ int enable_cmd_parser;
+ int guc_log_level;
+ int use_mmio_flip;
+ int mmio_debug;
+ int edp_vswing;
+ /* leave bools at the end to not create holes */
+ bool enable_hangcheck;
+ bool fastboot;
+ bool prefault_disable;
+ bool load_detect_test;
+ bool reset;
+ bool disable_display;
+ bool disable_vtd_wa;
+ bool enable_guc_submission;
+ bool verbose_state_checks;
+ bool nuclear_pageflip;
+};
+
+extern struct i915_params i915 __read_mostly;
+
+#endif
+
#define FPGA_DBG _MMIO(0x42300)
#define FPGA_DBG_RM_NOCLAIM (1<<31)
+#define CLAIM_ER _MMIO(VLV_DISPLAY_BASE + 0x2028)
+#define CLAIM_ER_CLR (1 << 31)
+#define CLAIM_ER_OVERFLOW (1 << 16)
+#define CLAIM_ER_CTR_MASK 0xffff
+
#define DERRMR _MMIO(0x44050)
/* Note that HBLANK events are reserved on bdw+ */
#define DERRMR_PIPEA_SCANLINE (1<<0)
struct bin_attribute *attr, char *buf,
loff_t offset, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
struct bin_attribute *attr, char *buf,
loff_t offset, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
loff_t off, size_t count)
{
- struct device *kdev = container_of(kobj, struct device, kobj);
+ struct device *kdev = kobj_to_dev(kobj);
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct i915_error_state_file_priv error_priv;
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
- struct device *kdev = container_of(kobj, struct device, kobj);
+ struct device *kdev = kobj_to_dev(kobj);
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
int ret;
{
struct intel_atomic_state *state = to_intel_atomic_state(s);
drm_atomic_state_default_clear(&state->base);
- state->dpll_set = false;
+ state->dpll_set = state->modeset = false;
}
intel_state->clip.x1 = 0;
intel_state->clip.y1 = 0;
intel_state->clip.x2 =
- crtc_state->base.active ? crtc_state->pipe_src_w : 0;
+ crtc_state->base.enable ? crtc_state->pipe_src_w : 0;
intel_state->clip.y2 =
- crtc_state->base.active ? crtc_state->pipe_src_h : 0;
+ crtc_state->base.enable ? crtc_state->pipe_src_h : 0;
if (state->fb && intel_rotation_90_or_270(state->rotation)) {
if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_plane_state *intel_state =
to_intel_plane_state(plane->state);
+ struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_existing_crtc_state(old_state->state, crtc);
- intel_plane->commit_plane(plane, intel_state);
+ if (intel_state->visible)
+ intel_plane->update_plane(plane,
+ to_intel_crtc_state(crtc_state),
+ intel_state);
+ else
+ intel_plane->disable_plane(plane, crtc);
}
const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
#include "i915_drv.h"
#include "intel_bios.h"
+/**
+ * DOC: Video BIOS Table (VBT)
+ *
+ * The Video BIOS Table, or VBT, provides platform and board specific
+ * configuration information to the driver that is not discoverable or available
+ * through other means. The configuration is mostly related to display
+ * hardware. The VBT is available via the ACPI OpRegion or, on older systems, in
+ * the PCI ROM.
+ *
+ * The VBT consists of a VBT Header (defined as &struct vbt_header), a BDB
+ * Header (&struct bdb_header), and a number of BIOS Data Blocks (BDB) that
+ * contain the actual configuration information. The VBT Header, and thus the
+ * VBT, begins with "$VBT" signature. The VBT Header contains the offset of the
+ * BDB Header. The data blocks are concatenated after the BDB Header. The data
+ * blocks have a 1-byte Block ID, 2-byte Block Size, and Block Size bytes of
+ * data. (Block 53, the MIPI Sequence Block is an exception.)
+ *
+ * The driver parses the VBT during load. The relevant information is stored in
+ * driver private data for ease of use, and the actual VBT is not read after
+ * that.
+ */
+
#define SLAVE_ADDR1 0x70
#define SLAVE_ADDR2 0x72
static int panel_type;
+/* Get BDB block size given a pointer to Block ID. */
+static u32 _get_blocksize(const u8 *block_base)
+{
+ /* The MIPI Sequence Block v3+ has a separate size field. */
+ if (*block_base == BDB_MIPI_SEQUENCE && *(block_base + 3) >= 3)
+ return *((const u32 *)(block_base + 4));
+ else
+ return *((const u16 *)(block_base + 1));
+}
+
+/* Get BDB block size give a pointer to data after Block ID and Block Size. */
+static u32 get_blocksize(const void *block_data)
+{
+ return _get_blocksize(block_data - 3);
+}
+
static const void *
find_section(const void *_bdb, int section_id)
{
/* walk the sections looking for section_id */
while (index + 3 < total) {
current_id = *(base + index);
- index++;
-
- current_size = *((const u16 *)(base + index));
- index += 2;
-
- /* The MIPI Sequence Block v3+ has a separate size field. */
- if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3)
- current_size = *((const u32 *)(base + index + 1));
+ current_size = _get_blocksize(base + index);
+ index += 3;
if (index + current_size > total)
return NULL;
return NULL;
}
-static u16
-get_blocksize(const void *p)
-{
- u16 *block_ptr, block_size;
-
- block_ptr = (u16 *)((char *)p - 2);
- block_size = *block_ptr;
- return block_size;
-}
-
static void
fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
const struct lvds_dvo_timing *dvo_timing)
dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
}
-static u8 *goto_next_sequence(u8 *data, int *size)
-{
- u16 len;
- int tmp = *size;
-
- if (--tmp < 0)
- return NULL;
-
- /* goto first element */
- data++;
- while (1) {
- switch (*data) {
- case MIPI_SEQ_ELEM_SEND_PKT:
- /*
- * skip by this element payload size
- * skip elem id, command flag and data type
- */
- tmp -= 5;
- if (tmp < 0)
- return NULL;
-
- data += 3;
- len = *((u16 *)data);
-
- tmp -= len;
- if (tmp < 0)
- return NULL;
-
- /* skip by len */
- data = data + 2 + len;
- break;
- case MIPI_SEQ_ELEM_DELAY:
- /* skip by elem id, and delay is 4 bytes */
- tmp -= 5;
- if (tmp < 0)
- return NULL;
-
- data += 5;
- break;
- case MIPI_SEQ_ELEM_GPIO:
- tmp -= 3;
- if (tmp < 0)
- return NULL;
-
- data += 3;
- break;
- default:
- DRM_ERROR("Unknown element\n");
- return NULL;
- }
-
- /* end of sequence ? */
- if (*data == 0)
- break;
- }
-
- /* goto next sequence or end of block byte */
- if (--tmp < 0)
- return NULL;
-
- data++;
-
- /* update amount of data left for the sequence block to be parsed */
- *size = tmp;
- return data;
-}
-
static void
-parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
+parse_mipi_config(struct drm_i915_private *dev_priv,
+ const struct bdb_header *bdb)
{
const struct bdb_mipi_config *start;
- const struct bdb_mipi_sequence *sequence;
const struct mipi_config *config;
const struct mipi_pps_data *pps;
- u8 *data;
- const u8 *seq_data;
- int i, panel_id, seq_size;
- u16 block_size;
/* parse MIPI blocks only if LFP type is MIPI */
if (!dev_priv->vbt.has_mipi)
/* We have mandatory mipi config blocks. Initialize as generic panel */
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
+}
- /* Check if we have sequence block as well */
- sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
- if (!sequence) {
- DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
- return;
+/* Find the sequence block and size for the given panel. */
+static const u8 *
+find_panel_sequence_block(const struct bdb_mipi_sequence *sequence,
+ u16 panel_id, u32 *seq_size)
+{
+ u32 total = get_blocksize(sequence);
+ const u8 *data = &sequence->data[0];
+ u8 current_id;
+ u32 current_size;
+ int header_size = sequence->version >= 3 ? 5 : 3;
+ int index = 0;
+ int i;
+
+ /* skip new block size */
+ if (sequence->version >= 3)
+ data += 4;
+
+ for (i = 0; i < MAX_MIPI_CONFIGURATIONS && index < total; i++) {
+ if (index + header_size > total) {
+ DRM_ERROR("Invalid sequence block (header)\n");
+ return NULL;
+ }
+
+ current_id = *(data + index);
+ if (sequence->version >= 3)
+ current_size = *((const u32 *)(data + index + 1));
+ else
+ current_size = *((const u16 *)(data + index + 1));
+
+ index += header_size;
+
+ if (index + current_size > total) {
+ DRM_ERROR("Invalid sequence block\n");
+ return NULL;
+ }
+
+ if (current_id == panel_id) {
+ *seq_size = current_size;
+ return data + index;
+ }
+
+ index += current_size;
}
- /* Fail gracefully for forward incompatible sequence block. */
- if (sequence->version >= 3) {
- DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n");
- return;
+ DRM_ERROR("Sequence block detected but no valid configuration\n");
+
+ return NULL;
+}
+
+static int goto_next_sequence(const u8 *data, int index, int total)
+{
+ u16 len;
+
+ /* Skip Sequence Byte. */
+ for (index = index + 1; index < total; index += len) {
+ u8 operation_byte = *(data + index);
+ index++;
+
+ switch (operation_byte) {
+ case MIPI_SEQ_ELEM_END:
+ return index;
+ case MIPI_SEQ_ELEM_SEND_PKT:
+ if (index + 4 > total)
+ return 0;
+
+ len = *((const u16 *)(data + index + 2)) + 4;
+ break;
+ case MIPI_SEQ_ELEM_DELAY:
+ len = 4;
+ break;
+ case MIPI_SEQ_ELEM_GPIO:
+ len = 2;
+ break;
+ case MIPI_SEQ_ELEM_I2C:
+ if (index + 7 > total)
+ return 0;
+ len = *(data + index + 6) + 7;
+ break;
+ default:
+ DRM_ERROR("Unknown operation byte\n");
+ return 0;
+ }
}
- DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
+ return 0;
+}
- block_size = get_blocksize(sequence);
+static int goto_next_sequence_v3(const u8 *data, int index, int total)
+{
+ int seq_end;
+ u16 len;
+ u32 size_of_sequence;
/*
- * parse the sequence block for individual sequences
+ * Could skip sequence based on Size of Sequence alone, but also do some
+ * checking on the structure.
*/
- dev_priv->vbt.dsi.seq_version = sequence->version;
+ if (total < 5) {
+ DRM_ERROR("Too small sequence size\n");
+ return 0;
+ }
- seq_data = &sequence->data[0];
+ /* Skip Sequence Byte. */
+ index++;
/*
- * sequence block is variable length and hence we need to parse and
- * get the sequence data for specific panel id
+ * Size of Sequence. Excludes the Sequence Byte and the size itself,
+ * includes MIPI_SEQ_ELEM_END byte, excludes the final MIPI_SEQ_END
+ * byte.
*/
- for (i = 0; i < MAX_MIPI_CONFIGURATIONS; i++) {
- panel_id = *seq_data;
- seq_size = *((u16 *) (seq_data + 1));
- if (panel_id == panel_type)
- break;
+ size_of_sequence = *((const uint32_t *)(data + index));
+ index += 4;
- /* skip the sequence including seq header of 3 bytes */
- seq_data = seq_data + 3 + seq_size;
- if ((seq_data - &sequence->data[0]) > block_size) {
- DRM_ERROR("Sequence start is beyond sequence block size, corrupted sequence block\n");
- return;
+ seq_end = index + size_of_sequence;
+ if (seq_end > total) {
+ DRM_ERROR("Invalid sequence size\n");
+ return 0;
+ }
+
+ for (; index < total; index += len) {
+ u8 operation_byte = *(data + index);
+ index++;
+
+ if (operation_byte == MIPI_SEQ_ELEM_END) {
+ if (index != seq_end) {
+ DRM_ERROR("Invalid element structure\n");
+ return 0;
+ }
+ return index;
+ }
+
+ len = *(data + index);
+ index++;
+
+ /*
+ * FIXME: Would be nice to check elements like for v1/v2 in
+ * goto_next_sequence() above.
+ */
+ switch (operation_byte) {
+ case MIPI_SEQ_ELEM_SEND_PKT:
+ case MIPI_SEQ_ELEM_DELAY:
+ case MIPI_SEQ_ELEM_GPIO:
+ case MIPI_SEQ_ELEM_I2C:
+ case MIPI_SEQ_ELEM_SPI:
+ case MIPI_SEQ_ELEM_PMIC:
+ break;
+ default:
+ DRM_ERROR("Unknown operation byte %u\n",
+ operation_byte);
+ break;
}
}
- if (i == MAX_MIPI_CONFIGURATIONS) {
- DRM_ERROR("Sequence block detected but no valid configuration\n");
+ return 0;
+}
+
+static void
+parse_mipi_sequence(struct drm_i915_private *dev_priv,
+ const struct bdb_header *bdb)
+{
+ const struct bdb_mipi_sequence *sequence;
+ const u8 *seq_data;
+ u32 seq_size;
+ u8 *data;
+ int index = 0;
+
+ /* Only our generic panel driver uses the sequence block. */
+ if (dev_priv->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
+ return;
+
+ sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
+ if (!sequence) {
+ DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
return;
}
- /* check if found sequence is completely within the sequence block
- * just being paranoid */
- if (seq_size > block_size) {
- DRM_ERROR("Corrupted sequence/size, bailing out\n");
+ /* Fail gracefully for forward incompatible sequence block. */
+ if (sequence->version >= 4) {
+ DRM_ERROR("Unable to parse MIPI Sequence Block v%u\n",
+ sequence->version);
return;
}
- /* skip the panel id(1 byte) and seq size(2 bytes) */
- dev_priv->vbt.dsi.data = kmemdup(seq_data + 3, seq_size, GFP_KERNEL);
- if (!dev_priv->vbt.dsi.data)
+ DRM_DEBUG_DRIVER("Found MIPI sequence block v%u\n", sequence->version);
+
+ seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size);
+ if (!seq_data)
return;
- /*
- * loop into the sequence data and split into multiple sequneces
- * There are only 5 types of sequences as of now
- */
- data = dev_priv->vbt.dsi.data;
- dev_priv->vbt.dsi.size = seq_size;
+ data = kmemdup(seq_data, seq_size, GFP_KERNEL);
+ if (!data)
+ return;
- /* two consecutive 0x00 indicate end of all sequences */
- while (1) {
- int seq_id = *data;
- if (MIPI_SEQ_MAX > seq_id && seq_id > MIPI_SEQ_UNDEFINED) {
- dev_priv->vbt.dsi.sequence[seq_id] = data;
- DRM_DEBUG_DRIVER("Found mipi sequence - %d\n", seq_id);
- } else {
- DRM_ERROR("undefined sequence\n");
+ /* Parse the sequences, store pointers to each sequence. */
+ for (;;) {
+ u8 seq_id = *(data + index);
+ if (seq_id == MIPI_SEQ_END)
+ break;
+
+ if (seq_id >= MIPI_SEQ_MAX) {
+ DRM_ERROR("Unknown sequence %u\n", seq_id);
goto err;
}
- /* partial parsing to skip elements */
- data = goto_next_sequence(data, &seq_size);
+ dev_priv->vbt.dsi.sequence[seq_id] = data + index;
- if (data == NULL) {
- DRM_ERROR("Sequence elements going beyond block itself. Sequence block parsing failed\n");
+ if (sequence->version >= 3)
+ index = goto_next_sequence_v3(data, index, seq_size);
+ else
+ index = goto_next_sequence(data, index, seq_size);
+ if (!index) {
+ DRM_ERROR("Invalid sequence %u\n", seq_id);
goto err;
}
-
- if (*data == 0)
- break; /* end of sequence reached */
}
- DRM_DEBUG_DRIVER("MIPI related vbt parsing complete\n");
+ dev_priv->vbt.dsi.data = data;
+ dev_priv->vbt.dsi.size = seq_size;
+ dev_priv->vbt.dsi.seq_version = sequence->version;
+
+ DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
return;
-err:
- kfree(dev_priv->vbt.dsi.data);
- dev_priv->vbt.dsi.data = NULL;
- /* error during parsing so set all pointers to null
- * because of partial parsing */
+err:
+ kfree(data);
memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
}
DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
return;
}
- if (bdb->version < 195) {
+ if (bdb->version < 106) {
+ expected_size = 22;
+ } else if (bdb->version < 109) {
+ expected_size = 27;
+ } else if (bdb->version < 195) {
+ BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
expected_size = sizeof(struct old_child_dev_config);
} else if (bdb->version == 195) {
expected_size = 37;
bdb->version, expected_size);
}
- /* The legacy sized child device config is the minimum we need. */
- if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
- DRM_ERROR("Child device config size %u is too small.\n",
- p_defs->child_dev_size);
- return;
- }
-
/* Flag an error for unexpected size, but continue anyway. */
if (p_defs->child_dev_size != expected_size)
DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
p_defs->child_dev_size, expected_size, bdb->version);
+ /* The legacy sized child device config is the minimum we need. */
+ if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
+ DRM_DEBUG_KMS("Child device config size %u is too small.\n",
+ p_defs->child_dev_size);
+ return;
+ }
+
/* get the block size of general definitions */
block_size = get_blocksize(p_defs);
/* get the number of child device */
/**
* intel_bios_init - find VBT and initialize settings from the BIOS
- * @dev: DRM device
+ * @dev_priv: i915 device instance
*
* Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
* to appropriate values.
parse_driver_features(dev_priv, bdb);
parse_edp(dev_priv, bdb);
parse_psr(dev_priv, bdb);
- parse_mipi(dev_priv, bdb);
+ parse_mipi_config(dev_priv, bdb);
+ parse_mipi_sequence(dev_priv, bdb);
parse_ddi_ports(dev_priv, bdb);
if (bios)
*
*/
-#ifndef _I830_BIOS_H_
-#define _I830_BIOS_H_
-
+#ifndef _INTEL_BIOS_H_
+#define _INTEL_BIOS_H_
+
+/**
+ * struct vbt_header - VBT Header structure
+ * @signature: VBT signature, always starts with "$VBT"
+ * @version: Version of this structure
+ * @header_size: Size of this structure
+ * @vbt_size: Size of VBT (VBT Header, BDB Header and data blocks)
+ * @vbt_checksum: Checksum
+ * @reserved0: Reserved
+ * @bdb_offset: Offset of &struct bdb_header from beginning of VBT
+ * @aim_offset: Offsets of add-in data blocks from beginning of VBT
+ */
struct vbt_header {
- u8 signature[20]; /**< Always starts with 'VBT$' */
- u16 version; /**< decimal */
- u16 header_size; /**< in bytes */
- u16 vbt_size; /**< in bytes */
+ u8 signature[20];
+ u16 version;
+ u16 header_size;
+ u16 vbt_size;
u8 vbt_checksum;
u8 reserved0;
- u32 bdb_offset; /**< from beginning of VBT */
- u32 aim_offset[4]; /**< from beginning of VBT */
+ u32 bdb_offset;
+ u32 aim_offset[4];
} __packed;
+/**
+ * struct bdb_header - BDB Header structure
+ * @signature: BDB signature "BIOS_DATA_BLOCK"
+ * @version: Version of the data block definitions
+ * @header_size: Size of this structure
+ * @bdb_size: Size of BDB (BDB Header and data blocks)
+ */
struct bdb_header {
- u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
- u16 version; /**< decimal */
- u16 header_size; /**< in bytes */
- u16 bdb_size; /**< in bytes */
+ u8 signature[16];
+ u16 version;
+ u16 header_size;
+ u16 bdb_size;
} __packed;
/* strictly speaking, this is a "skip" block, but it has interesting info */
/* MIPI Sequnece Block definitions */
enum mipi_seq {
- MIPI_SEQ_UNDEFINED = 0,
+ MIPI_SEQ_END = 0,
MIPI_SEQ_ASSERT_RESET,
MIPI_SEQ_INIT_OTP,
MIPI_SEQ_DISPLAY_ON,
MIPI_SEQ_DISPLAY_OFF,
MIPI_SEQ_DEASSERT_RESET,
+ MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
+ MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
+ MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
+ MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */
+ MIPI_SEQ_POWER_ON, /* sequence block v3+ */
+ MIPI_SEQ_POWER_OFF, /* sequence block v3+ */
MIPI_SEQ_MAX
};
enum mipi_seq_element {
- MIPI_SEQ_ELEM_UNDEFINED = 0,
+ MIPI_SEQ_ELEM_END = 0,
MIPI_SEQ_ELEM_SEND_PKT,
MIPI_SEQ_ELEM_DELAY,
MIPI_SEQ_ELEM_GPIO,
- MIPI_SEQ_ELEM_STATUS,
+ MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */
+ MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */
+ MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */
MIPI_SEQ_ELEM_MAX
};
MIPI_GPIO_MAX
};
-#endif /* _I830_BIOS_H_ */
+#endif /* _INTEL_BIOS_H_ */
#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
+#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
+
MODULE_FIRMWARE(I915_CSR_SKL);
MODULE_FIRMWARE(I915_CSR_BXT);
csr->version = css_header->version;
- if (IS_SKYLAKE(dev) && csr->version < SKL_CSR_VERSION_REQUIRED) {
+ if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
+ csr->version < SKL_CSR_VERSION_REQUIRED) {
DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u,"
" please upgrade to v%u.%u or later"
- " [https://01.org/linuxgraphics/intel-linux-graphics-firmwares].\n",
+ " [" FIRMWARE_URL "].\n",
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version),
CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED),
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
} else {
- DRM_ERROR("Failed to load DMC firmware, disabling rpm\n");
+ dev_notice(dev_priv->dev->dev,
+ "Failed to load DMC firmware"
+ " [" FIRMWARE_URL "],"
+ " disabling runtime power management.\n");
}
release_firmware(fw);
if (!HAS_CSR(dev_priv))
return;
- if (IS_SKYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT;
{ 0x00002016, 0x000000A0, 0x0 },
{ 0x00005012, 0x0000009B, 0x0 },
{ 0x00007011, 0x00000088, 0x0 },
- { 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
+ { 0x80009010, 0x000000C0, 0x1 },
{ 0x00002016, 0x0000009B, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
+ { 0x80007011, 0x000000C0, 0x1 },
{ 0x00002016, 0x000000DF, 0x0 },
- { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
+ { 0x80005012, 0x000000C0, 0x1 },
};
/* Skylake U */
static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
{ 0x0000201B, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
- { 0x00007011, 0x00000087, 0x0 },
- { 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
+ { 0x80007011, 0x000000CD, 0x0 },
+ { 0x80009010, 0x000000C0, 0x1 },
{ 0x0000201B, 0x0000009D, 0x0 },
- { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
- { 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
+ { 0x80005012, 0x000000C0, 0x1 },
+ { 0x80007011, 0x000000C0, 0x1 },
{ 0x00002016, 0x00000088, 0x0 },
- { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
+ { 0x80005012, 0x000000C0, 0x1 },
};
/* Skylake Y */
static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
{ 0x00000018, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
- { 0x00007011, 0x00000087, 0x0 },
- { 0x80009010, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
+ { 0x80007011, 0x000000CD, 0x0 },
+ { 0x80009010, 0x000000C0, 0x3 },
{ 0x00000018, 0x0000009D, 0x0 },
- { 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
- { 0x80007011, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
+ { 0x80005012, 0x000000C0, 0x3 },
+ { 0x80007011, 0x000000C0, 0x3 },
{ 0x00000018, 0x00000088, 0x0 },
- { 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
+ { 0x80005012, 0x000000C0, 0x3 },
};
/*
{ 0x00000018, 0x000000A1, 0x0 },
{ 0x00000018, 0x00000098, 0x0 },
{ 0x00004013, 0x00000088, 0x0 },
- { 0x00006012, 0x00000087, 0x0 },
+ { 0x80006012, 0x000000CD, 0x1 },
{ 0x00000018, 0x000000DF, 0x0 },
- { 0x00003015, 0x00000087, 0x0 }, /* Default */
- { 0x00003015, 0x000000C7, 0x0 },
- { 0x00000018, 0x000000C7, 0x0 },
+ { 0x80003015, 0x000000CD, 0x1 }, /* Default */
+ { 0x80003015, 0x000000C0, 0x1 },
+ { 0x80000018, 0x000000C0, 0x1 },
};
/* Skylake Y */
static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
{ 0x00000018, 0x000000A1, 0x0 },
{ 0x00005012, 0x000000DF, 0x0 },
- { 0x00007011, 0x00000084, 0x0 },
+ { 0x80007011, 0x000000CB, 0x3 },
{ 0x00000018, 0x000000A4, 0x0 },
{ 0x00000018, 0x0000009D, 0x0 },
{ 0x00004013, 0x00000080, 0x0 },
- { 0x00006013, 0x000000C7, 0x0 },
+ { 0x80006013, 0x000000C0, 0x3 },
{ 0x00000018, 0x0000008A, 0x0 },
- { 0x00003015, 0x000000C7, 0x0 }, /* Default */
- { 0x80003015, 0x000000C7, 0x7 }, /* Uses I_boost level 0x7 */
- { 0x00000018, 0x000000C7, 0x0 },
+ { 0x80003015, 0x000000C0, 0x3 }, /* Default */
+ { 0x80003015, 0x000000C0, 0x3 },
+ { 0x80000018, 0x000000C0, 0x3 },
};
struct bxt_ddi_buf_trans {
{ 154, 0x9A, 1, 128, true }, /* 9: 1200 0 */
};
-static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
- enum port port, int type);
+static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
+ u32 level, enum port port, int type);
static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
struct intel_digital_port **dig_port,
return port;
}
-static bool
-intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port)
-{
- return i915_mmio_reg_valid(intel_dig_port->hdmi.hdmi_reg);
-}
-
-static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
- int *n_entries)
+static const struct ddi_buf_trans *
+skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
- const struct ddi_buf_trans *ddi_translations;
-
- if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
- ddi_translations = skl_y_ddi_translations_dp;
+ if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
- } else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) {
- ddi_translations = skl_u_ddi_translations_dp;
+ return skl_y_ddi_translations_dp;
+ } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
+ return skl_u_ddi_translations_dp;
} else {
- ddi_translations = skl_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+ return skl_ddi_translations_dp;
}
-
- return ddi_translations;
}
-static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev,
- int *n_entries)
+static const struct ddi_buf_trans *
+skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- const struct ddi_buf_trans *ddi_translations;
-
- if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
- if (dev_priv->edp_low_vswing) {
- ddi_translations = skl_y_ddi_translations_edp;
+ if (dev_priv->edp_low_vswing) {
+ if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
- } else {
- ddi_translations = skl_y_ddi_translations_dp;
- *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
- }
- } else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) {
- if (dev_priv->edp_low_vswing) {
- ddi_translations = skl_u_ddi_translations_edp;
+ return skl_y_ddi_translations_edp;
+ } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
+ return skl_u_ddi_translations_edp;
} else {
- ddi_translations = skl_u_ddi_translations_dp;
- *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
- }
- } else {
- if (dev_priv->edp_low_vswing) {
- ddi_translations = skl_ddi_translations_edp;
*n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
- } else {
- ddi_translations = skl_ddi_translations_dp;
- *n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+ return skl_ddi_translations_edp;
}
}
- return ddi_translations;
+ return skl_get_buf_trans_dp(dev_priv, n_entries);
}
static const struct ddi_buf_trans *
-skl_get_buf_trans_hdmi(struct drm_device *dev,
- int *n_entries)
+skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
{
- const struct ddi_buf_trans *ddi_translations;
-
- if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
- ddi_translations = skl_y_ddi_translations_hdmi;
+ if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
+ return skl_y_ddi_translations_hdmi;
} else {
- ddi_translations = skl_ddi_translations_hdmi;
*n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
+ return skl_ddi_translations_hdmi;
}
-
- return ddi_translations;
}
/*
* in either FDI or DP modes only, as HDMI connections will work with both
* of those
*/
-static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
- bool supports_hdmi)
+void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
size;
- int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+ int hdmi_level;
+ enum port port;
const struct ddi_buf_trans *ddi_translations_fdi;
const struct ddi_buf_trans *ddi_translations_dp;
const struct ddi_buf_trans *ddi_translations_edp;
const struct ddi_buf_trans *ddi_translations_hdmi;
const struct ddi_buf_trans *ddi_translations;
- if (IS_BROXTON(dev)) {
- if (!supports_hdmi)
+ port = intel_ddi_get_encoder_port(encoder);
+ hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+
+ if (IS_BROXTON(dev_priv)) {
+ if (encoder->type != INTEL_OUTPUT_HDMI)
return;
/* Vswing programming for HDMI */
- bxt_ddi_vswing_sequence(dev, hdmi_level, port,
+ bxt_ddi_vswing_sequence(dev_priv, hdmi_level, port,
INTEL_OUTPUT_HDMI);
return;
- } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ }
+
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
ddi_translations_fdi = NULL;
ddi_translations_dp =
- skl_get_buf_trans_dp(dev, &n_dp_entries);
+ skl_get_buf_trans_dp(dev_priv, &n_dp_entries);
ddi_translations_edp =
- skl_get_buf_trans_edp(dev, &n_edp_entries);
+ skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
ddi_translations_hdmi =
- skl_get_buf_trans_hdmi(dev, &n_hdmi_entries);
+ skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
hdmi_default_entry = 8;
/* If we're boosting the current, set bit 31 of trans1 */
if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
dev_priv->vbt.ddi_port_info[port].dp_boost_level)
iboost_bit = 1<<31;
- } else if (IS_BROADWELL(dev)) {
+
+ if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
+ port != PORT_A && port != PORT_E &&
+ n_edp_entries > 9))
+ n_edp_entries = 9;
+ } else if (IS_BROADWELL(dev_priv)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_edp = bdw_ddi_translations_edp;
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
hdmi_default_entry = 7;
- } else if (IS_HASWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv)) {
ddi_translations_fdi = hsw_ddi_translations_fdi;
ddi_translations_dp = hsw_ddi_translations_dp;
ddi_translations_edp = hsw_ddi_translations_dp;
hdmi_default_entry = 7;
}
- switch (port) {
- case PORT_A:
+ switch (encoder->type) {
+ case INTEL_OUTPUT_EDP:
ddi_translations = ddi_translations_edp;
size = n_edp_entries;
break;
- case PORT_B:
- case PORT_C:
+ case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_HDMI:
ddi_translations = ddi_translations_dp;
size = n_dp_entries;
break;
- case PORT_D:
- if (intel_dp_is_edp(dev, PORT_D)) {
- ddi_translations = ddi_translations_edp;
- size = n_edp_entries;
- } else {
- ddi_translations = ddi_translations_dp;
- size = n_dp_entries;
- }
- break;
- case PORT_E:
- if (ddi_translations_fdi)
- ddi_translations = ddi_translations_fdi;
- else
- ddi_translations = ddi_translations_dp;
+ case INTEL_OUTPUT_ANALOG:
+ ddi_translations = ddi_translations_fdi;
size = n_dp_entries;
break;
default:
ddi_translations[i].trans2);
}
- if (!supports_hdmi)
+ if (encoder->type != INTEL_OUTPUT_HDMI)
return;
/* Choose a good default if VBT is badly populated */
ddi_translations_hdmi[hdmi_level].trans2);
}
-/* Program DDI buffers translations for DP. By default, program ports A-D in DP
- * mode and port E for FDI.
- */
-void intel_prepare_ddi(struct drm_device *dev)
-{
- struct intel_encoder *intel_encoder;
- bool visited[I915_MAX_PORTS] = { 0, };
-
- if (!HAS_DDI(dev))
- return;
-
- for_each_intel_encoder(dev, intel_encoder) {
- struct intel_digital_port *intel_dig_port;
- enum port port;
- bool supports_hdmi;
-
- if (intel_encoder->type == INTEL_OUTPUT_DSI)
- continue;
-
- ddi_get_encoder_port(intel_encoder, &intel_dig_port, &port);
- if (visited[port])
- continue;
-
- supports_hdmi = intel_dig_port &&
- intel_dig_port_supports_hdmi(intel_dig_port);
-
- intel_prepare_ddi_buffers(dev, port, supports_hdmi);
- visited[port] = true;
- }
-}
-
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
enum port port)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
u32 temp, i, rx_ctl_val;
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG);
+ intel_prepare_ddi_buffer(encoder);
+ }
+
/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
* mode set "sequence for CRT port" document:
* - TP1 to TP2 time with the default value
TRANS_CLK_SEL_DISABLED);
}
-static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
- enum port port, int type)
+static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
+ u32 level, enum port port, int type)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
const struct ddi_buf_trans *ddi_translations;
uint8_t iboost;
uint8_t dp_iboost, hdmi_iboost;
if (dp_iboost) {
iboost = dp_iboost;
} else {
- ddi_translations = skl_get_buf_trans_dp(dev, &n_entries);
+ ddi_translations = skl_get_buf_trans_dp(dev_priv, &n_entries);
iboost = ddi_translations[level].i_boost;
}
} else if (type == INTEL_OUTPUT_EDP) {
if (dp_iboost) {
iboost = dp_iboost;
} else {
- ddi_translations = skl_get_buf_trans_edp(dev, &n_entries);
+ ddi_translations = skl_get_buf_trans_edp(dev_priv, &n_entries);
+
+ if (WARN_ON(port != PORT_A &&
+ port != PORT_E && n_entries > 9))
+ n_entries = 9;
+
iboost = ddi_translations[level].i_boost;
}
} else if (type == INTEL_OUTPUT_HDMI) {
if (hdmi_iboost) {
iboost = hdmi_iboost;
} else {
- ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries);
+ ddi_translations = skl_get_buf_trans_hdmi(dev_priv, &n_entries);
iboost = ddi_translations[level].i_boost;
}
} else {
I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg);
}
-static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
- enum port port, int type)
+static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
+ u32 level, enum port port, int type)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
const struct bxt_ddi_buf_trans *ddi_translations;
u32 n_entries, i;
uint32_t val;
uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
{
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dport->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
struct intel_encoder *encoder = &dport->base;
uint8_t train_set = intel_dp->train_set[0];
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
level = translate_signal_level(signal_levels);
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
- skl_ddi_set_iboost(dev, level, port, encoder->type);
- else if (IS_BROXTON(dev))
- bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ skl_ddi_set_iboost(dev_priv, level, port, encoder->type);
+ else if (IS_BROXTON(dev_priv))
+ bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
return DDI_BUF_TRANS_SELECT(level);
}
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
- int hdmi_level;
+
+ intel_prepare_ddi_buffer(intel_encoder);
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
- if (port != PORT_A || INTEL_INFO(dev)->gen >= 9)
+ if (port != PORT_A || INTEL_INFO(dev_priv)->gen >= 9)
intel_dp_stop_link_train(intel_dp);
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- if (IS_BROXTON(dev)) {
- hdmi_level = dev_priv->vbt.
- ddi_port_info[port].hdmi_level_shift;
- bxt_ddi_vswing_sequence(dev, hdmi_level, port,
- INTEL_OUTPUT_HDMI);
- }
intel_hdmi->set_infoframes(encoder,
crtc->config->has_hdmi_sink,
&crtc->config->base.adjusted_mode);
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
bool init_hdmi, init_dp;
+ int max_lanes;
+
+ if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) {
+ switch (port) {
+ case PORT_A:
+ max_lanes = 4;
+ break;
+ case PORT_E:
+ max_lanes = 0;
+ break;
+ default:
+ max_lanes = 4;
+ break;
+ }
+ } else {
+ switch (port) {
+ case PORT_A:
+ max_lanes = 2;
+ break;
+ case PORT_E:
+ max_lanes = 2;
+ break;
+ default:
+ max_lanes = 4;
+ break;
+ }
+ }
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
(DDI_BUF_PORT_REVERSAL |
DDI_A_4_LANES);
+ intel_dig_port->max_lanes = max_lanes;
/*
* Bspec says that DDI_A_4_LANES is the only supported configuration
DRM_FORMAT_ARGB8888,
};
-static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
-
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
}
}
-static const char *state_string(bool enabled)
-{
- return enabled ? "on" : "off";
-}
-
/* Only for pre-ILK configs */
void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
cur_state = !!(val & DPLL_VCO_ENABLE);
I915_STATE_WARN(cur_state != state,
"PLL state assertion failure (expected %s, current %s)\n",
- state_string(state), state_string(cur_state));
+ onoff(state), onoff(cur_state));
}
/* XXX: the dsi pll is shared between MIPI DSI ports */
cur_state = val & DSI_PLL_VCO_EN;
I915_STATE_WARN(cur_state != state,
"DSI PLL state assertion failure (expected %s, current %s)\n",
- state_string(state), state_string(cur_state));
+ onoff(state), onoff(cur_state));
}
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
bool cur_state;
struct intel_dpll_hw_state hw_state;
- if (WARN (!pll,
- "asserting DPLL %s with no DPLL\n", state_string(state)))
+ if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
return;
cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
I915_STATE_WARN(cur_state != state,
"%s assertion failure (expected %s, current %s)\n",
- pll->name, state_string(state), state_string(cur_state));
+ pll->name, onoff(state), onoff(cur_state));
}
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
}
I915_STATE_WARN(cur_state != state,
"FDI TX state assertion failure (expected %s, current %s)\n",
- state_string(state), state_string(cur_state));
+ onoff(state), onoff(cur_state));
}
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
cur_state = !!(val & FDI_RX_ENABLE);
I915_STATE_WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
- state_string(state), state_string(cur_state));
+ onoff(state), onoff(cur_state));
}
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
cur_state = !!(val & FDI_RX_PLL_ENABLE);
I915_STATE_WARN(cur_state != state,
"FDI RX PLL assertion failure (expected %s, current %s)\n",
- state_string(state), state_string(cur_state));
+ onoff(state), onoff(cur_state));
}
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
I915_STATE_WARN(cur_state != state,
"cursor on pipe %c assertion failure (expected %s, current %s)\n",
- pipe_name(pipe), state_string(state), state_string(cur_state));
+ pipe_name(pipe), onoff(state), onoff(cur_state));
}
#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
I915_STATE_WARN(cur_state != state,
"pipe %c assertion failure (expected %s, current %s)\n",
- pipe_name(pipe), state_string(state), state_string(cur_state));
+ pipe_name(pipe), onoff(state), onoff(cur_state));
}
static void assert_plane(struct drm_i915_private *dev_priv,
cur_state = !!(val & DISPLAY_PLANE_ENABLE);
I915_STATE_WARN(cur_state != state,
"plane %c assertion failure (expected %s, current %s)\n",
- plane_name(plane), state_string(state), state_string(cur_state));
+ plane_name(plane), onoff(state), onoff(cur_state));
}
#define assert_plane_enabled(d, p) assert_plane(d, p, true)
I915_WRITE(reg, val | PIPECONF_ENABLE);
POSTING_READ(reg);
+
+ /*
+ * Until the pipe starts DSL will read as 0, which would cause
+ * an apparent vblank timestamp jump, which messes up also the
+ * frame count when it's derived from the timestamps. So let's
+ * wait for the pipe to start properly before we call
+ * drm_crtc_vblank_on()
+ */
+ if (dev->max_vblank_count == 0 &&
+ wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
+ DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
}
/**
return false;
}
-unsigned int
-intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
- uint64_t fb_format_modifier, unsigned int plane)
+static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
{
- unsigned int tile_height;
- uint32_t pixel_bytes;
+ return IS_GEN2(dev_priv) ? 2048 : 4096;
+}
- switch (fb_format_modifier) {
+static unsigned int intel_tile_width(const struct drm_i915_private *dev_priv,
+ uint64_t fb_modifier, unsigned int cpp)
+{
+ switch (fb_modifier) {
case DRM_FORMAT_MOD_NONE:
- tile_height = 1;
- break;
+ return cpp;
case I915_FORMAT_MOD_X_TILED:
- tile_height = IS_GEN2(dev) ? 16 : 8;
- break;
+ if (IS_GEN2(dev_priv))
+ return 128;
+ else
+ return 512;
case I915_FORMAT_MOD_Y_TILED:
- tile_height = 32;
- break;
+ if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
+ return 128;
+ else
+ return 512;
case I915_FORMAT_MOD_Yf_TILED:
- pixel_bytes = drm_format_plane_cpp(pixel_format, plane);
- switch (pixel_bytes) {
- default:
+ switch (cpp) {
case 1:
- tile_height = 64;
- break;
+ return 64;
case 2:
case 4:
- tile_height = 32;
- break;
+ return 128;
case 8:
- tile_height = 16;
- break;
case 16:
- WARN_ONCE(1,
- "128-bit pixels are not supported for display!");
- tile_height = 16;
- break;
+ return 256;
+ default:
+ MISSING_CASE(cpp);
+ return cpp;
}
break;
default:
- MISSING_CASE(fb_format_modifier);
- tile_height = 1;
- break;
+ MISSING_CASE(fb_modifier);
+ return cpp;
}
+}
- return tile_height;
+unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
+ uint64_t fb_modifier, unsigned int cpp)
+{
+ if (fb_modifier == DRM_FORMAT_MOD_NONE)
+ return 1;
+ else
+ return intel_tile_size(dev_priv) /
+ intel_tile_width(dev_priv, fb_modifier, cpp);
}
unsigned int
intel_fb_align_height(struct drm_device *dev, unsigned int height,
- uint32_t pixel_format, uint64_t fb_format_modifier)
+ uint32_t pixel_format, uint64_t fb_modifier)
{
- return ALIGN(height, intel_tile_height(dev, pixel_format,
- fb_format_modifier, 0));
+ unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
+ unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
+
+ return ALIGN(height, tile_height);
}
static void
intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
const struct drm_plane_state *plane_state)
{
+ struct drm_i915_private *dev_priv = to_i915(fb->dev);
struct intel_rotation_info *info = &view->params.rotation_info;
- unsigned int tile_height, tile_pitch;
+ unsigned int tile_size, tile_width, tile_height, cpp;
*view = i915_ggtt_view_normal;
info->uv_offset = fb->offsets[1];
info->fb_modifier = fb->modifier[0];
- tile_height = intel_tile_height(fb->dev, fb->pixel_format,
- fb->modifier[0], 0);
- tile_pitch = PAGE_SIZE / tile_height;
- info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
+ tile_size = intel_tile_size(dev_priv);
+
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ tile_width = intel_tile_width(dev_priv, cpp, fb->modifier[0]);
+ tile_height = tile_size / tile_width;
+
+ info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_width);
info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
- info->size = info->width_pages * info->height_pages * PAGE_SIZE;
+ info->size = info->width_pages * info->height_pages * tile_size;
if (info->pixel_format == DRM_FORMAT_NV12) {
- tile_height = intel_tile_height(fb->dev, fb->pixel_format,
- fb->modifier[0], 1);
- tile_pitch = PAGE_SIZE / tile_height;
- info->width_pages_uv = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
- info->height_pages_uv = DIV_ROUND_UP(fb->height / 2,
- tile_height);
- info->size_uv = info->width_pages_uv * info->height_pages_uv *
- PAGE_SIZE;
+ cpp = drm_format_plane_cpp(fb->pixel_format, 1);
+ tile_width = intel_tile_width(dev_priv, fb->modifier[1], cpp);
+ tile_height = tile_size / tile_width;
+
+ info->width_pages_uv = DIV_ROUND_UP(fb->pitches[1], tile_width);
+ info->height_pages_uv = DIV_ROUND_UP(fb->height / 2, tile_height);
+ info->size_uv = info->width_pages_uv * info->height_pages_uv * tile_size;
}
}
-static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
+static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return 256 * 1024;
return 0;
}
+static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
+ uint64_t fb_modifier)
+{
+ switch (fb_modifier) {
+ case DRM_FORMAT_MOD_NONE:
+ return intel_linear_alignment(dev_priv);
+ case I915_FORMAT_MOD_X_TILED:
+ if (INTEL_INFO(dev_priv)->gen >= 9)
+ return 256 * 1024;
+ return 0;
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ return 1 * 1024 * 1024;
+ default:
+ MISSING_CASE(fb_modifier);
+ return 0;
+ }
+}
+
int
intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- switch (fb->modifier[0]) {
- case DRM_FORMAT_MOD_NONE:
- alignment = intel_linear_alignment(dev_priv);
- break;
- case I915_FORMAT_MOD_X_TILED:
- if (INTEL_INFO(dev)->gen >= 9)
- alignment = 256 * 1024;
- else {
- /* pin() will align the object as required by fence */
- alignment = 0;
- }
- break;
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
- "Y tiling bo slipped through, driver bug!\n"))
- return -EINVAL;
- alignment = 1 * 1024 * 1024;
- break;
- default:
- MISSING_CASE(fb->modifier[0]);
- return -EINVAL;
- }
+ alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
intel_fill_fb_ggtt_view(&view, fb, plane_state);
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
* is assumed to be a power-of-two. */
-unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
- int *x, int *y,
- unsigned int tiling_mode,
- unsigned int cpp,
- unsigned int pitch)
-{
- if (tiling_mode != I915_TILING_NONE) {
+unsigned long intel_compute_tile_offset(struct drm_i915_private *dev_priv,
+ int *x, int *y,
+ uint64_t fb_modifier,
+ unsigned int cpp,
+ unsigned int pitch)
+{
+ if (fb_modifier != DRM_FORMAT_MOD_NONE) {
+ unsigned int tile_size, tile_width, tile_height;
unsigned int tile_rows, tiles;
- tile_rows = *y / 8;
- *y %= 8;
+ tile_size = intel_tile_size(dev_priv);
+ tile_width = intel_tile_width(dev_priv, fb_modifier, cpp);
+ tile_height = tile_size / tile_width;
- tiles = *x / (512/cpp);
- *x %= 512/cpp;
+ tile_rows = *y / tile_height;
+ *y %= tile_height;
- return tile_rows * pitch * 8 + tiles * 4096;
+ tiles = *x / (tile_width/cpp);
+ *x %= tile_width/cpp;
+
+ return tile_rows * pitch * tile_height + tiles * tile_size;
} else {
unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
unsigned int offset;
struct drm_plane_state *plane_state = primary->state;
struct drm_crtc_state *crtc_state = intel_crtc->base.state;
struct intel_plane *intel_plane = to_intel_plane(primary);
+ struct intel_plane_state *intel_state =
+ to_intel_plane_state(plane_state);
struct drm_framebuffer *fb;
if (!plane_config->fb)
plane_state->crtc_w = fb->width;
plane_state->crtc_h = fb->height;
+ intel_state->src.x1 = plane_state->src_x;
+ intel_state->src.y1 = plane_state->src_y;
+ intel_state->src.x2 = plane_state->src_x + plane_state->src_w;
+ intel_state->src.y2 = plane_state->src_y + plane_state->src_h;
+ intel_state->dst.x1 = plane_state->crtc_x;
+ intel_state->dst.y1 = plane_state->crtc_y;
+ intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
+ intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
+
obj = intel_fb_obj(fb);
if (obj->tiling_mode != I915_TILING_NONE)
dev_priv->preserve_bios_swizzle = true;
obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
}
-static void i9xx_update_primary_plane(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y)
+static void i9xx_update_primary_plane(struct drm_plane *primary,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = primary->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_plane *primary = crtc->primary;
- bool visible = to_intel_plane_state(primary->state)->visible;
- struct drm_i915_gem_object *obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int plane = intel_crtc->plane;
unsigned long linear_offset;
+ int x = plane_state->src.x1 >> 16;
+ int y = plane_state->src.y1 >> 16;
u32 dspcntr;
i915_reg_t reg = DSPCNTR(plane);
int pixel_size;
- if (!visible || !fb) {
- I915_WRITE(reg, 0);
- if (INTEL_INFO(dev)->gen >= 4)
- I915_WRITE(DSPSURF(plane), 0);
- else
- I915_WRITE(DSPADDR(plane), 0);
- POSTING_READ(reg);
- return;
- }
-
- obj = intel_fb_obj(fb);
- if (WARN_ON(obj == NULL))
- return;
-
pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
dspcntr = DISPPLANE_GAMMA_ENABLE;
* which should always be the user's requested size.
*/
I915_WRITE(DSPSIZE(plane),
- ((intel_crtc->config->pipe_src_h - 1) << 16) |
- (intel_crtc->config->pipe_src_w - 1));
+ ((crtc_state->pipe_src_h - 1) << 16) |
+ (crtc_state->pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
I915_WRITE(PRIMSIZE(plane),
- ((intel_crtc->config->pipe_src_h - 1) << 16) |
- (intel_crtc->config->pipe_src_w - 1));
+ ((crtc_state->pipe_src_h - 1) << 16) |
+ (crtc_state->pipe_src_w - 1));
I915_WRITE(PRIMPOS(plane), 0);
I915_WRITE(PRIMCNSTALPHA(plane), 0);
}
if (INTEL_INFO(dev)->gen >= 4) {
intel_crtc->dspaddr_offset =
- intel_gen4_compute_page_offset(dev_priv,
- &x, &y, obj->tiling_mode,
- pixel_size,
- fb->pitches[0]);
+ intel_compute_tile_offset(dev_priv, &x, &y,
+ fb->modifier[0],
+ pixel_size,
+ fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
} else {
intel_crtc->dspaddr_offset = linear_offset;
}
- if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
+ if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
dspcntr |= DISPPLANE_ROTATE_180;
- x += (intel_crtc->config->pipe_src_w - 1);
- y += (intel_crtc->config->pipe_src_h - 1);
+ x += (crtc_state->pipe_src_w - 1);
+ y += (crtc_state->pipe_src_h - 1);
/* Finding the last pixel of the last line of the display
data and adding to linear_offset*/
linear_offset +=
- (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
- (intel_crtc->config->pipe_src_w - 1) * pixel_size;
+ (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
+ (crtc_state->pipe_src_w - 1) * pixel_size;
}
intel_crtc->adjusted_x = x;
POSTING_READ(reg);
}
-static void ironlake_update_primary_plane(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y)
+static void i9xx_disable_primary_plane(struct drm_plane *primary,
+ struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_plane *primary = crtc->primary;
- bool visible = to_intel_plane_state(primary->state)->visible;
- struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
- unsigned long linear_offset;
- u32 dspcntr;
- i915_reg_t reg = DSPCNTR(plane);
- int pixel_size;
- if (!visible || !fb) {
- I915_WRITE(reg, 0);
+ I915_WRITE(DSPCNTR(plane), 0);
+ if (INTEL_INFO(dev_priv)->gen >= 4)
I915_WRITE(DSPSURF(plane), 0);
- POSTING_READ(reg);
- return;
- }
-
- obj = intel_fb_obj(fb);
- if (WARN_ON(obj == NULL))
- return;
+ else
+ I915_WRITE(DSPADDR(plane), 0);
+ POSTING_READ(DSPCNTR(plane));
+}
- pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+static void ironlake_update_primary_plane(struct drm_plane *primary,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_device *dev = primary->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ int plane = intel_crtc->plane;
+ unsigned long linear_offset;
+ u32 dspcntr;
+ i915_reg_t reg = DSPCNTR(plane);
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ int x = plane_state->src.x1 >> 16;
+ int y = plane_state->src.y1 >> 16;
dspcntr = DISPPLANE_GAMMA_ENABLE;
-
dspcntr |= DISPLAY_PLANE_ENABLE;
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
linear_offset = y * fb->pitches[0] + x * pixel_size;
intel_crtc->dspaddr_offset =
- intel_gen4_compute_page_offset(dev_priv,
- &x, &y, obj->tiling_mode,
- pixel_size,
- fb->pitches[0]);
+ intel_compute_tile_offset(dev_priv, &x, &y,
+ fb->modifier[0],
+ pixel_size,
+ fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
- if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
+ if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
dspcntr |= DISPPLANE_ROTATE_180;
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
- x += (intel_crtc->config->pipe_src_w - 1);
- y += (intel_crtc->config->pipe_src_h - 1);
+ x += (crtc_state->pipe_src_w - 1);
+ y += (crtc_state->pipe_src_h - 1);
/* Finding the last pixel of the last line of the display
data and adding to linear_offset*/
linear_offset +=
- (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
- (intel_crtc->config->pipe_src_w - 1) * pixel_size;
+ (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
+ (crtc_state->pipe_src_w - 1) * pixel_size;
}
}
POSTING_READ(reg);
}
-u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
- uint32_t pixel_format)
+u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
+ uint64_t fb_modifier, uint32_t pixel_format)
{
- u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
-
- /*
- * The stride is either expressed as a multiple of 64 bytes
- * chunks for linear buffers or in number of tiles for tiled
- * buffers.
- */
- switch (fb_modifier) {
- case DRM_FORMAT_MOD_NONE:
- return 64;
- case I915_FORMAT_MOD_X_TILED:
- if (INTEL_INFO(dev)->gen == 2)
- return 128;
- return 512;
- case I915_FORMAT_MOD_Y_TILED:
- /* No need to check for old gens and Y tiling since this is
- * about the display engine and those will be blocked before
- * we get here.
- */
- return 128;
- case I915_FORMAT_MOD_Yf_TILED:
- if (bits_per_pixel == 8)
- return 64;
- else
- return 128;
- default:
- MISSING_CASE(fb_modifier);
+ if (fb_modifier == DRM_FORMAT_MOD_NONE) {
return 64;
+ } else {
+ int cpp = drm_format_plane_cpp(pixel_format, 0);
+
+ return intel_tile_width(dev_priv, fb_modifier, cpp);
}
}
return 0;
}
-static void skylake_update_primary_plane(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y)
+static void skylake_update_primary_plane(struct drm_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_plane *plane = crtc->primary;
- bool visible = to_intel_plane_state(plane->state)->visible;
- struct drm_i915_gem_object *obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_crtc->pipe;
u32 plane_ctl, stride_div, stride;
u32 tile_height, plane_offset, plane_size;
- unsigned int rotation;
+ unsigned int rotation = plane_state->base.rotation;
int x_offset, y_offset;
u32 surf_addr;
- struct intel_crtc_state *crtc_state = intel_crtc->config;
- struct intel_plane_state *plane_state;
- int src_x = 0, src_y = 0, src_w = 0, src_h = 0;
- int dst_x = 0, dst_y = 0, dst_w = 0, dst_h = 0;
- int scaler_id = -1;
-
- plane_state = to_intel_plane_state(plane->state);
-
- if (!visible || !fb) {
- I915_WRITE(PLANE_CTL(pipe, 0), 0);
- I915_WRITE(PLANE_SURF(pipe, 0), 0);
- POSTING_READ(PLANE_CTL(pipe, 0));
- return;
- }
+ int scaler_id = plane_state->scaler_id;
+ int src_x = plane_state->src.x1 >> 16;
+ int src_y = plane_state->src.y1 >> 16;
+ int src_w = drm_rect_width(&plane_state->src) >> 16;
+ int src_h = drm_rect_height(&plane_state->src) >> 16;
+ int dst_x = plane_state->dst.x1;
+ int dst_y = plane_state->dst.y1;
+ int dst_w = drm_rect_width(&plane_state->dst);
+ int dst_h = drm_rect_height(&plane_state->dst);
plane_ctl = PLANE_CTL_ENABLE |
PLANE_CTL_PIPE_GAMMA_ENABLE |
plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
-
- rotation = plane->state->rotation;
plane_ctl |= skl_plane_ctl_rotation(rotation);
- obj = intel_fb_obj(fb);
- stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
+ stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
fb->pixel_format);
surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
WARN_ON(drm_rect_width(&plane_state->src) == 0);
- scaler_id = plane_state->scaler_id;
- src_x = plane_state->src.x1 >> 16;
- src_y = plane_state->src.y1 >> 16;
- src_w = drm_rect_width(&plane_state->src) >> 16;
- src_h = drm_rect_height(&plane_state->src) >> 16;
- dst_x = plane_state->dst.x1;
- dst_y = plane_state->dst.y1;
- dst_w = drm_rect_width(&plane_state->dst);
- dst_h = drm_rect_height(&plane_state->dst);
-
- WARN_ON(x != src_x || y != src_y);
-
if (intel_rotation_90_or_270(rotation)) {
+ int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+
/* stride = Surface height in tiles */
- tile_height = intel_tile_height(dev, fb->pixel_format,
- fb->modifier[0], 0);
+ tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
stride = DIV_ROUND_UP(fb->height, tile_height);
- x_offset = stride * tile_height - y - src_h;
- y_offset = x;
+ x_offset = stride * tile_height - src_y - src_h;
+ y_offset = src_x;
plane_size = (src_w - 1) << 16 | (src_h - 1);
} else {
stride = fb->pitches[0] / stride_div;
- x_offset = x;
- y_offset = y;
+ x_offset = src_x;
+ y_offset = src_y;
plane_size = (src_h - 1) << 16 | (src_w - 1);
}
plane_offset = y_offset << 16 | x_offset;
POSTING_READ(PLANE_SURF(pipe, 0));
}
-/* Assume fb object is pinned & idle & fenced and just update base pointers */
-static int
-intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int x, int y, enum mode_set_atomic state)
+static void skylake_disable_primary_plane(struct drm_plane *primary,
+ struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = to_intel_crtc(crtc)->pipe;
if (dev_priv->fbc.deactivate)
dev_priv->fbc.deactivate(dev_priv);
- dev_priv->display.update_primary_plane(crtc, fb, x, y);
+ I915_WRITE(PLANE_CTL(pipe, 0), 0);
+ I915_WRITE(PLANE_SURF(pipe, 0), 0);
+ POSTING_READ(PLANE_SURF(pipe, 0));
+}
- return 0;
+/* Assume fb object is pinned & idle & fenced and just update base pointers */
+static int
+intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ /* Support for kgdboc is disabled, this needs a major rework. */
+ DRM_ERROR("legacy panic handler not supported any more.\n");
+
+ return -ENODEV;
}
static void intel_complete_page_flips(struct drm_device *dev)
drm_modeset_lock_crtc(crtc, &plane->base);
plane_state = to_intel_plane_state(plane->base.state);
- if (crtc->state->active && plane_state->base.fb)
- plane->commit_plane(&plane->base, plane_state);
+ if (plane_state->visible)
+ plane->update_plane(&plane->base,
+ to_intel_crtc_state(crtc->state),
+ plane_state);
drm_modeset_unlock_crtc(crtc);
}
intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
- &state->scaler_state.scaler_id, DRM_ROTATE_0,
+ &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
state->pipe_src_w, state->pipe_src_h,
adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
}
static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
{
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long put_domains[I915_MAX_PIPES] = {};
modeset_get_crtc_power_domains(crtc);
}
- if (dev_priv->display.modeset_commit_cdclk) {
- unsigned int cdclk = to_intel_atomic_state(state)->cdclk;
-
- if (cdclk != dev_priv->cdclk_freq &&
- !WARN_ON(!state->allow_modeset))
- dev_priv->display.modeset_commit_cdclk(state);
- }
+ if (dev_priv->display.modeset_commit_cdclk &&
+ intel_state->dev_cdclk != dev_priv->cdclk_freq)
+ dev_priv->display.modeset_commit_cdclk(state);
for (i = 0; i < I915_MAX_PIPES; i++)
if (put_domains[i])
static int intel_mode_max_pixclk(struct drm_device *dev,
struct drm_atomic_state *state)
{
- struct intel_crtc *intel_crtc;
- struct intel_crtc_state *crtc_state;
- int max_pixclk = 0;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ unsigned max_pixclk = 0, i;
+ enum pipe pipe;
- for_each_intel_crtc(dev, intel_crtc) {
- crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
+ memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
+ sizeof(intel_state->min_pixclk));
- if (!crtc_state->base.enable)
- continue;
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ int pixclk = 0;
- max_pixclk = max(max_pixclk,
- crtc_state->base.adjusted_mode.crtc_clock);
+ if (crtc_state->enable)
+ pixclk = crtc_state->adjusted_mode.crtc_clock;
+
+ intel_state->min_pixclk[i] = pixclk;
}
+ if (!intel_state->active_crtcs)
+ return 0;
+
+ for_each_pipe(dev_priv, pipe)
+ max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
+
return max_pixclk;
}
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int max_pixclk = intel_mode_max_pixclk(dev, state);
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(state);
if (max_pixclk < 0)
return max_pixclk;
- to_intel_atomic_state(state)->cdclk =
+ intel_state->cdclk = intel_state->dev_cdclk =
valleyview_calc_cdclk(dev_priv, max_pixclk);
+ if (!intel_state->active_crtcs)
+ intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
+
return 0;
}
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int max_pixclk = intel_mode_max_pixclk(dev, state);
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(state);
if (max_pixclk < 0)
return max_pixclk;
- to_intel_atomic_state(state)->cdclk =
+ intel_state->cdclk = intel_state->dev_cdclk =
broxton_calc_cdclk(dev_priv, max_pixclk);
+ if (!intel_state->active_crtcs)
+ intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0);
+
return 0;
}
static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
- unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
+ unsigned req_cdclk = old_intel_state->dev_cdclk;
/*
* FIXME: We can end up here with all power domains off, yet
for_each_power_domain(domain, domains)
intel_display_power_put(dev_priv, domain);
intel_crtc->enabled_power_domains = 0;
+
+ dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
+ dev_priv->min_pixclk[intel_crtc->pipe] = 0;
}
/*
* in cases where we need the PLL enabled even when @pipe is not going to
* be enabled.
*/
-void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
- const struct dpll *dpll)
+int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+ const struct dpll *dpll)
{
struct intel_crtc *crtc =
to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
- struct intel_crtc_state pipe_config = {
- .base.crtc = &crtc->base,
- .pixel_multiplier = 1,
- .dpll = *dpll,
- };
+ struct intel_crtc_state *pipe_config;
+
+ pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
+ if (!pipe_config)
+ return -ENOMEM;
+
+ pipe_config->base.crtc = &crtc->base;
+ pipe_config->pixel_multiplier = 1;
+ pipe_config->dpll = *dpll;
if (IS_CHERRYVIEW(dev)) {
- chv_compute_dpll(crtc, &pipe_config);
- chv_prepare_pll(crtc, &pipe_config);
- chv_enable_pll(crtc, &pipe_config);
+ chv_compute_dpll(crtc, pipe_config);
+ chv_prepare_pll(crtc, pipe_config);
+ chv_enable_pll(crtc, pipe_config);
} else {
- vlv_compute_dpll(crtc, &pipe_config);
- vlv_prepare_pll(crtc, &pipe_config);
- vlv_enable_pll(crtc, &pipe_config);
+ vlv_compute_dpll(crtc, pipe_config);
+ vlv_prepare_pll(crtc, pipe_config);
+ vlv_enable_pll(crtc, pipe_config);
}
+
+ kfree(pipe_config);
+
+ return 0;
}
/**
fb->width = ((val >> 0) & 0x1fff) + 1;
val = I915_READ(PLANE_STRIDE(pipe, 0));
- stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0],
+ stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
fb->pixel_format);
fb->pitches[0] = (val & 0x3ff) * stride_mult;
val |= PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
}
-
- intel_prepare_ddi(dev);
}
static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
- unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
+ unsigned int req_cdclk = old_intel_state->dev_cdclk;
broxton_set_cdclk(dev, req_cdclk);
}
/* compute the max rate for new configuration */
static int ilk_max_pixel_rate(struct drm_atomic_state *state)
{
- struct intel_crtc *intel_crtc;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = state->dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *cstate;
struct intel_crtc_state *crtc_state;
- int max_pixel_rate = 0;
+ unsigned max_pixel_rate = 0, i;
+ enum pipe pipe;
- for_each_intel_crtc(state->dev, intel_crtc) {
- int pixel_rate;
+ memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
+ sizeof(intel_state->min_pixclk));
- crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
+ for_each_crtc_in_state(state, crtc, cstate, i) {
+ int pixel_rate;
- if (!crtc_state->base.enable)
+ crtc_state = to_intel_crtc_state(cstate);
+ if (!crtc_state->base.enable) {
+ intel_state->min_pixclk[i] = 0;
continue;
+ }
pixel_rate = ilk_pipe_pixel_rate(crtc_state);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled)
+ if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
- max_pixel_rate = max(max_pixel_rate, pixel_rate);
+ intel_state->min_pixclk[i] = pixel_rate;
}
+ if (!intel_state->active_crtcs)
+ return 0;
+
+ for_each_pipe(dev_priv, pipe)
+ max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
+
return max_pixel_rate;
}
static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
int max_pixclk = ilk_max_pixel_rate(state);
int cdclk;
return -EINVAL;
}
- to_intel_atomic_state(state)->cdclk = cdclk;
+ intel_state->cdclk = intel_state->dev_cdclk = cdclk;
+ if (!intel_state->active_crtcs)
+ intel_state->dev_cdclk = 337500;
return 0;
}
static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
- unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
+ unsigned req_cdclk = old_intel_state->dev_cdclk;
broadwell_set_cdclk(dev, req_cdclk);
}
return true;
}
-static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
+static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
+ const struct intel_plane_state *plane_state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t cntl = 0, size = 0;
- if (on) {
- unsigned int width = intel_crtc->base.cursor->state->crtc_w;
- unsigned int height = intel_crtc->base.cursor->state->crtc_h;
+ if (plane_state && plane_state->visible) {
+ unsigned int width = plane_state->base.crtc_w;
+ unsigned int height = plane_state->base.crtc_h;
unsigned int stride = roundup_pow_of_two(width) * 4;
switch (stride) {
}
}
-static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
+static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
+ const struct intel_plane_state *plane_state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = intel_crtc->pipe;
uint32_t cntl = 0;
- if (on) {
+ if (plane_state && plane_state->visible) {
cntl = MCURSOR_GAMMA_ENABLE;
- switch (intel_crtc->base.cursor->state->crtc_w) {
+ switch (plane_state->base.crtc_w) {
case 64:
cntl |= CURSOR_MODE_64_ARGB_AX;
break;
cntl |= CURSOR_MODE_256_ARGB_AX;
break;
default:
- MISSING_CASE(intel_crtc->base.cursor->state->crtc_w);
+ MISSING_CASE(plane_state->base.crtc_w);
return;
}
cntl |= pipe << 28; /* Connect to correct pipe */
if (HAS_DDI(dev))
cntl |= CURSOR_PIPE_CSC_ENABLE;
- }
- if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180))
- cntl |= CURSOR_ROTATE_180;
+ if (plane_state->base.rotation == BIT(DRM_ROTATE_180))
+ cntl |= CURSOR_ROTATE_180;
+ }
if (intel_crtc->cursor_cntl != cntl) {
I915_WRITE(CURCNTR(pipe), cntl);
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
static void intel_crtc_update_cursor(struct drm_crtc *crtc,
- bool on)
+ const struct intel_plane_state *plane_state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- struct drm_plane_state *cursor_state = crtc->cursor->state;
- int x = cursor_state->crtc_x;
- int y = cursor_state->crtc_y;
- u32 base = 0, pos = 0;
-
- base = intel_crtc->cursor_addr;
+ u32 base = intel_crtc->cursor_addr;
+ u32 pos = 0;
- if (x >= intel_crtc->config->pipe_src_w)
- on = false;
+ if (plane_state) {
+ int x = plane_state->base.crtc_x;
+ int y = plane_state->base.crtc_y;
- if (y >= intel_crtc->config->pipe_src_h)
- on = false;
-
- if (x < 0) {
- if (x + cursor_state->crtc_w <= 0)
- on = false;
-
- pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
- x = -x;
- }
- pos |= x << CURSOR_X_SHIFT;
+ if (x < 0) {
+ pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
+ x = -x;
+ }
+ pos |= x << CURSOR_X_SHIFT;
- if (y < 0) {
- if (y + cursor_state->crtc_h <= 0)
- on = false;
+ if (y < 0) {
+ pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
+ y = -y;
+ }
+ pos |= y << CURSOR_Y_SHIFT;
- pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
- y = -y;
+ /* ILK+ do this automagically */
+ if (HAS_GMCH_DISPLAY(dev) &&
+ plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
+ base += (plane_state->base.crtc_h *
+ plane_state->base.crtc_w - 1) * 4;
+ }
}
- pos |= y << CURSOR_Y_SHIFT;
I915_WRITE(CURPOS(pipe), pos);
- /* ILK+ do this automagically */
- if (HAS_GMCH_DISPLAY(dev) &&
- crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
- base += (cursor_state->crtc_h *
- cursor_state->crtc_w - 1) * 4;
- }
-
if (IS_845G(dev) || IS_I865G(dev))
- i845_update_cursor(crtc, base, on);
+ i845_update_cursor(crtc, base, plane_state);
else
- i9xx_update_cursor(crtc, base, on);
+ i9xx_update_cursor(crtc, base, plane_state);
}
static bool cursor_size_ok(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
struct drm_display_mode *mode;
- struct intel_crtc_state pipe_config;
+ struct intel_crtc_state *pipe_config;
int htot = I915_READ(HTOTAL(cpu_transcoder));
int hsync = I915_READ(HSYNC(cpu_transcoder));
int vtot = I915_READ(VTOTAL(cpu_transcoder));
if (!mode)
return NULL;
+ pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
+ if (!pipe_config) {
+ kfree(mode);
+ return NULL;
+ }
+
/*
* Construct a pipe_config sufficient for getting the clock info
* back out of crtc_clock_get.
* Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
* to use a real value here instead.
*/
- pipe_config.cpu_transcoder = (enum transcoder) pipe;
- pipe_config.pixel_multiplier = 1;
- pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
- pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
- pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
- i9xx_crtc_clock_get(intel_crtc, &pipe_config);
-
- mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
+ pipe_config->cpu_transcoder = (enum transcoder) pipe;
+ pipe_config->pixel_multiplier = 1;
+ pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
+ pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
+ pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
+ i9xx_crtc_clock_get(intel_crtc, pipe_config);
+
+ mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1;
drm_mode_set_name(mode);
+ kfree(pipe_config);
+
return mode;
}
*/
if (intel_rotation_90_or_270(rotation)) {
/* stride = Surface height in tiles */
- tile_height = intel_tile_height(dev, fb->pixel_format,
- fb->modifier[0], 0);
+ tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0);
stride = DIV_ROUND_UP(fb->height, tile_height);
} else {
stride = fb->pitches[0] /
- intel_fb_stride_alignment(dev, fb->modifier[0],
- fb->pixel_format);
+ intel_fb_stride_alignment(dev_priv, fb->modifier[0],
+ fb->pixel_format);
}
/*
obj->last_write_req);
} else {
if (!request) {
- ret = i915_gem_request_alloc(ring, ring->default_context, &request);
- if (ret)
+ request = i915_gem_request_alloc(ring, NULL);
+ if (IS_ERR(request)) {
+ ret = PTR_ERR(request);
goto cleanup_unpin;
+ }
}
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
if (!was_crtc_enabled && WARN_ON(was_visible))
was_visible = false;
- if (!is_crtc_enabled && WARN_ON(visible))
- visible = false;
+ /*
+ * Visibility is calculated as if the crtc was on, but
+ * after scaler setup everything depends on it being off
+ * when the crtc isn't active.
+ */
+ if (!is_crtc_enabled)
+ to_intel_plane_state(plane_state)->visible = visible = false;
if (!was_visible && !visible)
return 0;
BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
- if (m > m2) {
- while (m > m2) {
+ if (n > n2) {
+ while (n > n2) {
m2 <<= 1;
n2 <<= 1;
}
- } else if (m < m2) {
- while (m < m2) {
+ } else if (n < n2) {
+ while (n < n2) {
m <<= 1;
n <<= 1;
}
}
- return m == m2 && n == n2;
+ if (n != n2)
+ return false;
+
+ return intel_fuzzy_clock_check(m, m2);
}
static bool
static int intel_modeset_checks(struct drm_atomic_state *state)
{
- struct drm_device *dev = state->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = state->dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int ret = 0, i;
if (!check_digital_port_conflicts(state)) {
DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
return -EINVAL;
}
+ intel_state->modeset = true;
+ intel_state->active_crtcs = dev_priv->active_crtcs;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc_state->active)
+ intel_state->active_crtcs |= 1 << i;
+ else
+ intel_state->active_crtcs &= ~(1 << i);
+ }
+
/*
* See if the config requires any additional preparation, e.g.
* to adjust global state with pipes off. We need to do this
* adjusted_mode bits in the crtc directly.
*/
if (dev_priv->display.modeset_calc_cdclk) {
- unsigned int cdclk;
-
ret = dev_priv->display.modeset_calc_cdclk(state);
- cdclk = to_intel_atomic_state(state)->cdclk;
- if (!ret && cdclk != dev_priv->cdclk_freq)
+ if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq)
ret = intel_modeset_all_pipes(state);
if (ret < 0)
return ret;
} else
- to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq;
+ to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
intel_modeset_clear_plls(state);
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
return haswell_mode_set_planes_workaround(state);
return 0;
struct drm_atomic_state *state,
bool async)
{
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
- int ret = 0;
- int i;
- bool any_ms = false;
+ int ret = 0, i;
+ bool hw_check = intel_state->modeset;
ret = intel_atomic_prepare_commit(dev, state, async);
if (ret) {
drm_atomic_helper_swap_state(dev, state);
dev_priv->wm.config = to_intel_atomic_state(state)->wm_config;
+ if (intel_state->modeset) {
+ memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
+ sizeof(intel_state->min_pixclk));
+ dev_priv->active_crtcs = intel_state->active_crtcs;
+ dev_priv->atomic_cdclk_freq = intel_state->cdclk;
+ }
+
for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
if (!needs_modeset(crtc->state))
continue;
- any_ms = true;
intel_pre_plane_update(intel_crtc);
if (crtc_state->active) {
* update the the output configuration. */
intel_modeset_update_crtc_state(state);
- if (any_ms) {
+ if (intel_state->modeset) {
intel_shared_dpll_commit(state);
drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
put_domains = modeset_get_crtc_power_domains(crtc);
/* make sure intel_modeset_check_state runs */
- any_ms = true;
+ hw_check = true;
}
if (!modeset)
drm_atomic_helper_cleanup_planes(dev, state);
mutex_unlock(&dev->struct_mutex);
- if (any_ms)
+ if (hw_check)
intel_modeset_check_state(dev, state);
drm_atomic_state_free(state);
+ /* As one of the primary mmio accessors, KMS has a high likelihood
+ * of triggering bugs in unclaimed access. After we finish
+ * modesetting, see if an error has been flagged, and if so
+ * enable debugging for the next modeset - and hope we catch
+ * the culprit.
+ *
+ * XXX note that we assume display power is on at this point.
+ * This might hold true now but we need to add pm helper to check
+ * unclaimed only when the hardware is on, as atomic commits
+ * can happen also when the device is completely off.
+ */
+ intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+
return 0;
}
struct drm_i915_private *dev_priv;
int crtc_clock, cdclk;
- if (!intel_crtc || !crtc_state)
+ if (!intel_crtc || !crtc_state->base.enable)
return DRM_PLANE_HELPER_NO_SCALING;
dev = intel_crtc->base.dev;
&state->visible);
}
-static void
-intel_commit_primary_plane(struct drm_plane *plane,
- struct intel_plane_state *state)
-{
- struct drm_crtc *crtc = state->base.crtc;
- struct drm_framebuffer *fb = state->base.fb;
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- crtc = crtc ? crtc : plane->crtc;
-
- dev_priv->display.update_primary_plane(crtc, fb,
- state->src.x1 >> 16,
- state->src.y1 >> 16);
-}
-
-static void
-intel_disable_primary_plane(struct drm_plane *plane,
- struct drm_crtc *crtc)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- dev_priv->display.update_primary_plane(crtc, NULL, 0, 0);
-}
-
static void intel_begin_crtc_commit(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
primary->plane = pipe;
primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
primary->check_plane = intel_check_primary_plane;
- primary->commit_plane = intel_commit_primary_plane;
- primary->disable_plane = intel_disable_primary_plane;
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
primary->plane = !pipe;
if (INTEL_INFO(dev)->gen >= 9) {
intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats);
+
+ primary->update_plane = skylake_update_primary_plane;
+ primary->disable_plane = skylake_disable_primary_plane;
+ } else if (HAS_PCH_SPLIT(dev)) {
+ intel_primary_formats = i965_primary_formats;
+ num_formats = ARRAY_SIZE(i965_primary_formats);
+
+ primary->update_plane = ironlake_update_primary_plane;
+ primary->disable_plane = i9xx_disable_primary_plane;
} else if (INTEL_INFO(dev)->gen >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
+
+ primary->update_plane = i9xx_update_primary_plane;
+ primary->disable_plane = i9xx_disable_primary_plane;
} else {
intel_primary_formats = i8xx_primary_formats;
num_formats = ARRAY_SIZE(i8xx_primary_formats);
+
+ primary->update_plane = i9xx_update_primary_plane;
+ primary->disable_plane = i9xx_disable_primary_plane;
}
drm_universal_plane_init(dev, &primary->base, 0,
intel_disable_cursor_plane(struct drm_plane *plane,
struct drm_crtc *crtc)
{
- intel_crtc_update_cursor(crtc, false);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ intel_crtc->cursor_addr = 0;
+ intel_crtc_update_cursor(crtc, NULL);
}
static void
-intel_commit_cursor_plane(struct drm_plane *plane,
- struct intel_plane_state *state)
+intel_update_cursor_plane(struct drm_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *state)
{
- struct drm_crtc *crtc = state->base.crtc;
+ struct drm_crtc *crtc = crtc_state->base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = plane->dev;
- struct intel_crtc *intel_crtc;
struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
uint32_t addr;
- crtc = crtc ? crtc : plane->crtc;
- intel_crtc = to_intel_crtc(crtc);
-
if (!obj)
addr = 0;
else if (!INTEL_INFO(dev)->cursor_needs_physical)
addr = obj->phys_handle->busaddr;
intel_crtc->cursor_addr = addr;
-
- if (crtc->state->active)
- intel_crtc_update_cursor(crtc, state->visible);
+ intel_crtc_update_cursor(crtc, state);
}
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
cursor->plane = pipe;
cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
cursor->check_plane = intel_check_cursor_plane;
- cursor->commit_plane = intel_commit_cursor_plane;
+ cursor->update_plane = intel_update_cursor_plane;
cursor->disable_plane = intel_disable_cursor_plane;
drm_universal_plane_init(dev, &cursor->base, 0,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned int aligned_height;
int ret;
u32 pitch_limit, stride_alignment;
return -EINVAL;
}
- stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0],
+ stride_alignment = intel_fb_stride_alignment(dev_priv,
+ mode_cmd->modifier[0],
mode_cmd->pixel_format);
if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
intel_fb->obj = obj;
- intel_fb->obj->framebuffer_references++;
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
return ret;
}
+ intel_fb->obj->framebuffer_references++;
+
return 0;
}
haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
- dev_priv->display.update_primary_plane =
- skylake_update_primary_plane;
} else if (HAS_DDI(dev)) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
dev_priv->display.get_initial_plane_config =
haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
- dev_priv->display.update_primary_plane =
- ironlake_update_primary_plane;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
dev_priv->display.get_initial_plane_config =
ironlake_crtc_compute_clock;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
- dev_priv->display.update_primary_plane =
- ironlake_update_primary_plane;
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_initial_plane_config =
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
- dev_priv->display.update_primary_plane =
- i9xx_update_primary_plane;
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_initial_plane_config =
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
- dev_priv->display.update_primary_plane =
- i9xx_update_primary_plane;
}
/* Returns the core display clock speed */
void intel_modeset_init_hw(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
intel_update_cdclk(dev);
- intel_prepare_ddi(dev);
+
+ dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
+
intel_init_clock_gating(dev);
intel_enable_gt_powersave(dev);
}
+/*
+ * Calculate what we think the watermarks should be for the state we've read
+ * out of the hardware and then immediately program those watermarks so that
+ * we ensure the hardware settings match our internal state.
+ *
+ * We can calculate what we think WM's should be by creating a duplicate of the
+ * current state (which was constructed during hardware readout) and running it
+ * through the atomic check code to calculate new watermark values in the
+ * state object.
+ */
+static void sanitize_watermarks(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_atomic_state *state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *cstate;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret;
+ int i;
+
+ /* Only supported on platforms that use atomic watermark design */
+ if (!dev_priv->display.program_watermarks)
+ return;
+
+ /*
+ * We need to hold connection_mutex before calling duplicate_state so
+ * that the connector loop is protected.
+ */
+ drm_modeset_acquire_init(&ctx, 0);
+retry:
+ ret = drm_modeset_lock_all_ctx(dev, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ } else if (WARN_ON(ret)) {
+ goto fail;
+ }
+
+ state = drm_atomic_helper_duplicate_state(dev, &ctx);
+ if (WARN_ON(IS_ERR(state)))
+ goto fail;
+
+ ret = intel_atomic_check(dev, state);
+ if (ret) {
+ /*
+ * If we fail here, it means that the hardware appears to be
+ * programmed in a way that shouldn't be possible, given our
+ * understanding of watermark requirements. This might mean a
+ * mistake in the hardware readout code or a mistake in the
+ * watermark calculations for a given platform. Raise a WARN
+ * so that this is noticeable.
+ *
+ * If this actually happens, we'll have to just leave the
+ * BIOS-programmed watermarks untouched and hope for the best.
+ */
+ WARN(true, "Could not determine valid watermarks for inherited state\n");
+ goto fail;
+ }
+
+ /* Write calculated watermark values back */
+ to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config;
+ for_each_crtc_in_state(state, crtc, cstate, i) {
+ struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
+
+ dev_priv->display.program_watermarks(cs);
+ }
+
+ drm_atomic_state_free(state);
+fail:
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
*/
intel_find_initial_plane_obj(crtc, &plane_config);
}
+
+ /*
+ * Make sure hardware watermarks really match the state we read out.
+ * Note that we need to do this after reconstructing the BIOS fb's
+ * since the watermark calculation done here will use pstate->fb.
+ */
+ sanitize_watermarks(dev);
}
static void intel_enable_pipe_a(struct drm_device *dev)
struct intel_connector *connector;
int i;
+ dev_priv->active_crtcs = 0;
+
for_each_intel_crtc(dev, crtc) {
- __drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state);
- memset(crtc->config, 0, sizeof(*crtc->config));
- crtc->config->base.crtc = &crtc->base;
+ struct intel_crtc_state *crtc_state = crtc->config;
+ int pixclk = 0;
- crtc->active = dev_priv->display.get_pipe_config(crtc,
- crtc->config);
+ __drm_atomic_helper_crtc_destroy_state(&crtc->base, &crtc_state->base);
+ memset(crtc_state, 0, sizeof(*crtc_state));
+ crtc_state->base.crtc = &crtc->base;
- crtc->base.state->active = crtc->active;
- crtc->base.enabled = crtc->active;
+ crtc_state->base.active = crtc_state->base.enable =
+ dev_priv->display.get_pipe_config(crtc, crtc_state);
+
+ crtc->base.enabled = crtc_state->base.enable;
+ crtc->active = crtc_state->base.active;
+
+ if (crtc_state->base.active) {
+ dev_priv->active_crtcs |= 1 << crtc->pipe;
+
+ if (IS_BROADWELL(dev_priv)) {
+ pixclk = ilk_pipe_pixel_rate(crtc_state);
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (crtc_state->ips_enabled)
+ pixclk = DIV_ROUND_UP(pixclk * 100, 95);
+ } else if (IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) ||
+ IS_BROXTON(dev_priv))
+ pixclk = crtc_state->base.adjusted_mode.crtc_clock;
+ else
+ WARN_ON(dev_priv->display.modeset_calc_cdclk);
+ }
+
+ dev_priv->min_pixclk[crtc->pipe] = pixclk;
readout_plane_state(crtc);
for_each_pipe(dev_priv, i) {
err_printf(m, "Pipe [%d]:\n", i);
err_printf(m, " Power: %s\n",
- error->pipe[i].power_domain_on ? "on" : "off");
+ onoff(error->pipe[i].power_domain_on));
err_printf(m, " SRC: %08x\n", error->pipe[i].source);
err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
err_printf(m, "CPU transcoder: %c\n",
transcoder_name(error->transcoder[i].cpu_transcoder));
err_printf(m, " Power: %s\n",
- error->transcoder[i].power_domain_on ? "on" : "off");
+ onoff(error->transcoder[i].power_domain_on));
err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
u8 source_max, sink_max;
- source_max = 4;
- if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
- (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
- source_max = 2;
-
+ source_max = intel_dig_port->max_lanes;
sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
return min(source_max, sink_max);
release_cl_override = IS_CHERRYVIEW(dev) &&
!chv_phy_powergate_ch(dev_priv, phy, ch, true);
- vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
- &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
+ if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
+ &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
+ DRM_ERROR("Failed to force on pll for pipe %c!\n",
+ pipe_name(pipe));
+ return;
+ }
}
/*
_intel_edp_backlight_off(intel_dp);
}
-static const char *state_string(bool enabled)
-{
- return enabled ? "on" : "off";
-}
-
static void assert_dp_port(struct intel_dp *intel_dp, bool state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
I915_STATE_WARN(cur_state != state,
"DP port %c state assertion failure (expected %s, current %s)\n",
port_name(dig_port->port),
- state_string(state), state_string(cur_state));
+ onoff(state), onoff(cur_state));
}
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
I915_STATE_WARN(cur_state != state,
"eDP PLL state assertion failure (expected %s, current %s)\n",
- state_string(state), state_string(cur_state));
+ onoff(state), onoff(cur_state));
}
#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
enum port port = intel_dig_port->port;
int type, ret;
+ if (WARN(intel_dig_port->max_lanes < 1,
+ "Not enough lanes (%d) for DP on port %c\n",
+ intel_dig_port->max_lanes, port_name(port)))
+ return false;
+
intel_dp->pps_pipe = INVALID_PIPE;
/* intel_dp vfuncs */
intel_dig_port->port = port;
dev_priv->dig_port_map[port] = intel_encoder;
intel_dig_port->dp.output_reg = output_reg;
+ intel_dig_port->max_lanes = 4;
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
if (IS_CHERRYVIEW(dev)) {
intel_mst->port = found->port;
if (intel_dp->active_mst_links == 0) {
- intel_ddi_clk_select(encoder, intel_crtc->config);
+ intel_prepare_ddi_buffer(&intel_dig_port->base);
+
+ intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config);
intel_dp_set_link_params(intel_dp, intel_crtc->config);
struct drm_atomic_state base;
unsigned int cdclk;
- bool dpll_set;
+
+ /*
+ * Calculated device cdclk, can be different from cdclk
+ * only when all crtc's are DPMS off.
+ */
+ unsigned int dev_cdclk;
+
+ bool dpll_set, modeset;
+
+ unsigned int active_crtcs;
+ unsigned int min_pixclk[I915_MAX_PIPES];
+
struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
struct intel_wm_config wm_config;
};
/*
* NOTE: Do not place new plane state fields here (e.g., when adding
* new plane properties). New runtime state should now be placed in
- * the intel_plane_state structure and accessed via drm_plane->state.
+ * the intel_plane_state structure and accessed via plane_state.
*/
void (*update_plane)(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t x, uint32_t y,
- uint32_t src_w, uint32_t src_h);
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
void (*disable_plane)(struct drm_plane *plane,
struct drm_crtc *crtc);
int (*check_plane)(struct drm_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state);
- void (*commit_plane)(struct drm_plane *plane,
- struct intel_plane_state *state);
};
struct intel_watermark_params {
struct intel_hdmi hdmi;
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
bool release_cl2_override;
+ uint8_t max_lanes;
/* for communication with audio component; protected by av_mutex */
const struct drm_connector *audio_connector;
};
/* intel_ddi.c */
void intel_ddi_clk_select(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
-void intel_prepare_ddi(struct drm_device *dev);
+void intel_prepare_ddi_buffer(struct intel_encoder *encoder);
void hsw_fdi_link_train(struct drm_crtc *crtc);
void intel_ddi_init(struct drm_device *dev, enum port port);
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
uint64_t fb_format_modifier);
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
enum fb_op_origin origin);
-u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
- uint32_t pixel_format);
+u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
+ uint64_t fb_modifier, uint32_t pixel_format);
/* intel_audio.c */
void intel_init_audio(struct drm_device *dev);
int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state);
-unsigned int
-intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
- uint64_t fb_format_modifier, unsigned int plane);
+unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
+ uint64_t fb_modifier, unsigned int cpp);
static inline bool
intel_rotation_90_or_270(unsigned int rotation)
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *state);
-void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
- const struct dpll *dpll);
+int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+ const struct dpll *dpll);
void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
/* modesetting asserts */
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
- int *x, int *y,
- unsigned int tiling_mode,
- unsigned int bpp,
- unsigned int pitch);
+unsigned long intel_compute_tile_offset(struct drm_i915_private *dev_priv,
+ int *x, int *y,
+ uint64_t fb_modifier,
+ unsigned int cpp,
+ unsigned int pitch);
void intel_prepare_reset(struct drm_device *dev);
void intel_finish_reset(struct drm_device *dev);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- u32 pclk = 0;
+ u32 pclk;
DRM_DEBUG_KMS("\n");
pipe_config->has_dsi_encoder = true;
*/
pipe_config->dpll_hw_state.dpll_md = 0;
- if (IS_BROXTON(encoder->base.dev))
- pclk = bxt_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
- else if (IS_VALLEYVIEW(encoder->base.dev) ||
- IS_CHERRYVIEW(encoder->base.dev))
- pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
-
+ pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp);
if (!pclk)
return;
extern void intel_enable_dsi_pll(struct intel_encoder *encoder);
extern void intel_disable_dsi_pll(struct intel_encoder *encoder);
-extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
-extern u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
+extern u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp);
extern void intel_dsi_reset_clocks(struct intel_encoder *encoder,
enum port port);
return data;
}
+static const u8 *mipi_exec_i2c_skip(struct intel_dsi *intel_dsi, const u8 *data)
+{
+ return data + *(data + 6) + 7;
+}
+
typedef const u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi,
const u8 *data);
static const fn_mipi_elem_exec exec_elem[] = {
- NULL, /* reserved */
- mipi_exec_send_packet,
- mipi_exec_delay,
- mipi_exec_gpio,
- NULL, /* status read; later */
+ [MIPI_SEQ_ELEM_SEND_PKT] = mipi_exec_send_packet,
+ [MIPI_SEQ_ELEM_DELAY] = mipi_exec_delay,
+ [MIPI_SEQ_ELEM_GPIO] = mipi_exec_gpio,
+ [MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c_skip,
};
/*
*/
static const char * const seq_name[] = {
- "UNDEFINED",
- "MIPI_SEQ_ASSERT_RESET",
- "MIPI_SEQ_INIT_OTP",
- "MIPI_SEQ_DISPLAY_ON",
- "MIPI_SEQ_DISPLAY_OFF",
- "MIPI_SEQ_DEASSERT_RESET"
+ [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET",
+ [MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP",
+ [MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON",
+ [MIPI_SEQ_DISPLAY_OFF] = "MIPI_SEQ_DISPLAY_OFF",
+ [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
+ [MIPI_SEQ_BACKLIGHT_ON] = "MIPI_SEQ_BACKLIGHT_ON",
+ [MIPI_SEQ_BACKLIGHT_OFF] = "MIPI_SEQ_BACKLIGHT_OFF",
+ [MIPI_SEQ_TEAR_ON] = "MIPI_SEQ_TEAR_ON",
+ [MIPI_SEQ_TEAR_OFF] = "MIPI_SEQ_TEAR_OFF",
+ [MIPI_SEQ_POWER_ON] = "MIPI_SEQ_POWER_ON",
+ [MIPI_SEQ_POWER_OFF] = "MIPI_SEQ_POWER_OFF",
};
-static void generic_exec_sequence(struct intel_dsi *intel_dsi, const u8 *data)
+static const char *sequence_name(enum mipi_seq seq_id)
{
+ if (seq_id < ARRAY_SIZE(seq_name) && seq_name[seq_id])
+ return seq_name[seq_id];
+ else
+ return "(unknown)";
+}
+
+static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
+{
+ struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+ struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ const u8 *data;
fn_mipi_elem_exec mipi_elem_exec;
- int index;
- if (!data)
+ if (WARN_ON(seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence)))
+ return;
+
+ data = dev_priv->vbt.dsi.sequence[seq_id];
+ if (!data) {
+ DRM_DEBUG_KMS("MIPI sequence %d - %s not available\n",
+ seq_id, sequence_name(seq_id));
return;
+ }
- DRM_DEBUG_DRIVER("Starting MIPI sequence - %s\n", seq_name[*data]);
+ WARN_ON(*data != seq_id);
- /* go to the first element of the sequence */
- data++;
+ DRM_DEBUG_KMS("Starting MIPI sequence %d - %s\n",
+ seq_id, sequence_name(seq_id));
- /* parse each byte till we reach end of sequence byte - 0x00 */
- while (1) {
- index = *data;
- mipi_elem_exec = exec_elem[index];
- if (!mipi_elem_exec) {
- DRM_ERROR("Unsupported MIPI element, skipping sequence execution\n");
- return;
- }
+ /* Skip Sequence Byte. */
+ data++;
- /* goto element payload */
- data++;
+ /* Skip Size of Sequence. */
+ if (dev_priv->vbt.dsi.seq_version >= 3)
+ data += 4;
- /* execute the element specific rotines */
- data = mipi_elem_exec(intel_dsi, data);
+ while (1) {
+ u8 operation_byte = *data++;
+ u8 operation_size = 0;
- /*
- * After processing the element, data should point to
- * next element or end of sequence
- * check if have we reached end of sequence
- */
- if (*data == 0x00)
+ if (operation_byte == MIPI_SEQ_ELEM_END)
break;
+
+ if (operation_byte < ARRAY_SIZE(exec_elem))
+ mipi_elem_exec = exec_elem[operation_byte];
+ else
+ mipi_elem_exec = NULL;
+
+ /* Size of Operation. */
+ if (dev_priv->vbt.dsi.seq_version >= 3)
+ operation_size = *data++;
+
+ if (mipi_elem_exec) {
+ data = mipi_elem_exec(intel_dsi, data);
+ } else if (operation_size) {
+ /* We have size, skip. */
+ DRM_DEBUG_KMS("Unsupported MIPI operation byte %u\n",
+ operation_byte);
+ data += operation_size;
+ } else {
+ /* No size, can't skip without parsing. */
+ DRM_ERROR("Unsupported MIPI operation byte %u\n",
+ operation_byte);
+ return;
+ }
}
}
static int vbt_panel_prepare(struct drm_panel *panel)
{
- struct vbt_panel *vbt_panel = to_vbt_panel(panel);
- struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const u8 *sequence;
-
- sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET];
- generic_exec_sequence(intel_dsi, sequence);
-
- sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
- generic_exec_sequence(intel_dsi, sequence);
+ generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
+ generic_exec_sequence(panel, MIPI_SEQ_INIT_OTP);
return 0;
}
static int vbt_panel_unprepare(struct drm_panel *panel)
{
- struct vbt_panel *vbt_panel = to_vbt_panel(panel);
- struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const u8 *sequence;
-
- sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET];
- generic_exec_sequence(intel_dsi, sequence);
+ generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET);
return 0;
}
static int vbt_panel_enable(struct drm_panel *panel)
{
- struct vbt_panel *vbt_panel = to_vbt_panel(panel);
- struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const u8 *sequence;
-
- sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON];
- generic_exec_sequence(intel_dsi, sequence);
+ generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_ON);
return 0;
}
static int vbt_panel_disable(struct drm_panel *panel)
{
- struct vbt_panel *vbt_panel = to_vbt_panel(panel);
- struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const u8 *sequence;
-
- sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF];
- generic_exec_sequence(intel_dsi, sequence);
+ generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_OFF);
return 0;
}
/* This is cheating a bit with the cleanup. */
vbt_panel = devm_kzalloc(dev->dev, sizeof(*vbt_panel), GFP_KERNEL);
+ if (!vbt_panel)
+ return NULL;
vbt_panel->intel_dsi = intel_dsi;
drm_panel_init(&vbt_panel->panel);
#include "i915_drv.h"
#include "intel_dsi.h"
-#define DSI_HSS_PACKET_SIZE 4
-#define DSI_HSE_PACKET_SIZE 4
-#define DSI_HSA_PACKET_EXTRA_SIZE 6
-#define DSI_HBP_PACKET_EXTRA_SIZE 6
-#define DSI_HACTIVE_PACKET_EXTRA_SIZE 6
-#define DSI_HFP_PACKET_EXTRA_SIZE 6
-#define DSI_EOTP_PACKET_SIZE 4
-
static int dsi_pixel_format_bpp(int pixel_format)
{
int bpp;
71, 35, 273, 136, 324, 418, 465, 488, 500, 506 /* 91 - 100 */
};
-#ifdef DSI_CLK_FROM_RR
-
-static u32 dsi_rr_formula(const struct drm_display_mode *mode,
- int pixel_format, int video_mode_format,
- int lane_count, bool eotp)
-{
- u32 bpp;
- u32 hactive, vactive, hfp, hsync, hbp, vfp, vsync, vbp;
- u32 hsync_bytes, hbp_bytes, hactive_bytes, hfp_bytes;
- u32 bytes_per_line, bytes_per_frame;
- u32 num_frames;
- u32 bytes_per_x_frames, bytes_per_x_frames_x_lanes;
- u32 dsi_bit_clock_hz;
- u32 dsi_clk;
-
- bpp = dsi_pixel_format_bpp(pixel_format);
-
- hactive = mode->hdisplay;
- vactive = mode->vdisplay;
- hfp = mode->hsync_start - mode->hdisplay;
- hsync = mode->hsync_end - mode->hsync_start;
- hbp = mode->htotal - mode->hsync_end;
-
- vfp = mode->vsync_start - mode->vdisplay;
- vsync = mode->vsync_end - mode->vsync_start;
- vbp = mode->vtotal - mode->vsync_end;
-
- hsync_bytes = DIV_ROUND_UP(hsync * bpp, 8);
- hbp_bytes = DIV_ROUND_UP(hbp * bpp, 8);
- hactive_bytes = DIV_ROUND_UP(hactive * bpp, 8);
- hfp_bytes = DIV_ROUND_UP(hfp * bpp, 8);
-
- bytes_per_line = DSI_HSS_PACKET_SIZE + hsync_bytes +
- DSI_HSA_PACKET_EXTRA_SIZE + DSI_HSE_PACKET_SIZE +
- hbp_bytes + DSI_HBP_PACKET_EXTRA_SIZE +
- hactive_bytes + DSI_HACTIVE_PACKET_EXTRA_SIZE +
- hfp_bytes + DSI_HFP_PACKET_EXTRA_SIZE;
-
- /*
- * XXX: Need to accurately calculate LP to HS transition timeout and add
- * it to bytes_per_line/bytes_per_frame.
- */
-
- if (eotp && video_mode_format == VIDEO_MODE_BURST)
- bytes_per_line += DSI_EOTP_PACKET_SIZE;
-
- bytes_per_frame = vsync * bytes_per_line + vbp * bytes_per_line +
- vactive * bytes_per_line + vfp * bytes_per_line;
-
- if (eotp &&
- (video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ||
- video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS))
- bytes_per_frame += DSI_EOTP_PACKET_SIZE;
-
- num_frames = drm_mode_vrefresh(mode);
- bytes_per_x_frames = num_frames * bytes_per_frame;
-
- bytes_per_x_frames_x_lanes = bytes_per_x_frames / lane_count;
-
- /* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
- dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
- dsi_clk = dsi_bit_clock_hz / 1000;
-
- if (eotp && video_mode_format == VIDEO_MODE_BURST)
- dsi_clk *= 2;
-
- return dsi_clk;
-}
-
-#else
-
/* Get DSI clock from pixel clock */
static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
{
return dsi_clk_khz;
}
-#endif
-
static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
struct dsi_mnp *dsi_mnp, int target_dsi_clk)
{
bpp, pipe_bpp);
}
-u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
+static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
return pclk;
}
-u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
+static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
{
u32 pclk;
u32 dsi_clk;
return pclk;
}
+u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
+{
+ if (IS_BROXTON(encoder->base.dev))
+ return bxt_dsi_get_pclk(encoder, pipe_bpp);
+ else
+ return vlv_dsi_get_pclk(encoder, pipe_bpp);
+}
+
static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
u32 temp;
{
struct intel_fbdev *ifbdev =
container_of(helper, struct intel_fbdev, helper);
- struct drm_framebuffer *fb = NULL;
+ struct drm_framebuffer *fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_mode_fb_cmd2 mode_cmd = {};
out:
mutex_unlock(&dev->struct_mutex);
- if (!IS_ERR_OR_NULL(fb))
- drm_framebuffer_unreference(fb);
return ret;
}
uint32_t wq_offset;
uint32_t wq_size;
uint32_t wq_tail;
+ uint32_t wq_head;
/* GuC submission statistics & status */
uint64_t submissions[I915_NUM_RINGS];
uint32_t log_flags;
struct drm_i915_gem_object *log_obj;
+ struct drm_i915_gem_object *ads_obj;
+
struct drm_i915_gem_object *ctx_pool_obj;
struct ida ctx_ids;
struct drm_i915_gem_request *rq);
void i915_guc_submission_disable(struct drm_device *dev);
void i915_guc_submission_fini(struct drm_device *dev);
+int i915_guc_wq_check_space(struct i915_guc_client *client);
#endif
#define GUC_CTX_PRIORITY_HIGH 1
#define GUC_CTX_PRIORITY_KMD_NORMAL 2
#define GUC_CTX_PRIORITY_NORMAL 3
+#define GUC_CTX_PRIORITY_NUM 4
#define GUC_MAX_GPU_CONTEXTS 1024
#define GUC_INVALID_CTX_ID GUC_MAX_GPU_CONTEXTS
#define GUC_CTL_CTXINFO 0
#define GUC_CTL_CTXNUM_IN16_SHIFT 0
#define GUC_CTL_BASE_ADDR_SHIFT 12
+
#define GUC_CTL_ARAT_HIGH 1
#define GUC_CTL_ARAT_LOW 2
+
#define GUC_CTL_DEVICE_INFO 3
#define GUC_CTL_GTTYPE_SHIFT 0
#define GUC_CTL_COREFAMILY_SHIFT 7
+
#define GUC_CTL_LOG_PARAMS 4
#define GUC_LOG_VALID (1 << 0)
#define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1)
#define GUC_LOG_ISR_PAGES 3
#define GUC_LOG_ISR_SHIFT 9
#define GUC_LOG_BUF_ADDR_SHIFT 12
+
#define GUC_CTL_PAGE_FAULT_CONTROL 5
+
#define GUC_CTL_WA 6
#define GUC_CTL_WA_UK_BY_DRIVER (1 << 3)
+
#define GUC_CTL_FEATURE 7
#define GUC_CTL_VCS2_ENABLED (1 << 0)
#define GUC_CTL_KERNEL_SUBMISSIONS (1 << 1)
#define GUC_CTL_PREEMPTION_LOG (1 << 5)
#define GUC_CTL_ENABLE_SLPC (1 << 7)
#define GUC_CTL_RESET_ON_PREMPT_FAILURE (1 << 8)
+
#define GUC_CTL_DEBUG 8
#define GUC_LOG_VERBOSITY_SHIFT 0
#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT)
/* Verbosity range-check limits, without the shift */
#define GUC_LOG_VERBOSITY_MIN 0
#define GUC_LOG_VERBOSITY_MAX 3
+#define GUC_LOG_VERBOSITY_MASK 0x0000000f
+#define GUC_LOG_DESTINATION_MASK (3 << 4)
+#define GUC_LOG_DISABLED (1 << 6)
+#define GUC_PROFILE_ENABLED (1 << 7)
+#define GUC_WQ_TRACK_ENABLED (1 << 8)
+#define GUC_ADS_ENABLED (1 << 9)
+#define GUC_DEBUG_RESERVED (1 << 10)
+#define GUC_ADS_ADDR_SHIFT 11
+#define GUC_ADS_ADDR_MASK 0xfffff800
+
#define GUC_CTL_RSRVD 9
-#define GUC_CTL_MAX_DWORDS (GUC_CTL_RSRVD + 1)
+#define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */
/**
* DOC: GuC Firmware Layout
#define GUC_POWER_D2 3
#define GUC_POWER_D3 4
+/* Scheduling policy settings */
+
+/* Reset engine upon preempt failure */
+#define POLICY_RESET_ENGINE (1<<0)
+/* Preempt to idle on quantum expiry */
+#define POLICY_PREEMPT_TO_IDLE (1<<1)
+
+#define POLICY_MAX_NUM_WI 15
+
+struct guc_policy {
+ /* Time for one workload to execute. (in micro seconds) */
+ u32 execution_quantum;
+ u32 reserved1;
+
+ /* Time to wait for a preemption request to completed before issuing a
+ * reset. (in micro seconds). */
+ u32 preemption_time;
+
+ /* How much time to allow to run after the first fault is observed.
+ * Then preempt afterwards. (in micro seconds) */
+ u32 fault_time;
+
+ u32 policy_flags;
+ u32 reserved[2];
+} __packed;
+
+struct guc_policies {
+ struct guc_policy policy[GUC_CTX_PRIORITY_NUM][I915_NUM_RINGS];
+
+ /* In micro seconds. How much time to allow before DPC processing is
+ * called back via interrupt (to prevent DPC queue drain starving).
+ * Typically 1000s of micro seconds (example only, not granularity). */
+ u32 dpc_promote_time;
+
+ /* Must be set to take these new values. */
+ u32 is_valid;
+
+ /* Max number of WIs to process per call. A large value may keep CS
+ * idle. */
+ u32 max_num_work_items;
+
+ u32 reserved[19];
+} __packed;
+
+/* GuC MMIO reg state struct */
+
+#define GUC_REGSET_FLAGS_NONE 0x0
+#define GUC_REGSET_POWERCYCLE 0x1
+#define GUC_REGSET_MASKED 0x2
+#define GUC_REGSET_ENGINERESET 0x4
+#define GUC_REGSET_SAVE_DEFAULT_VALUE 0x8
+#define GUC_REGSET_SAVE_CURRENT_VALUE 0x10
+
+#define GUC_REGSET_MAX_REGISTERS 25
+#define GUC_MMIO_WHITE_LIST_START 0x24d0
+#define GUC_MMIO_WHITE_LIST_MAX 12
+#define GUC_S3_SAVE_SPACE_PAGES 10
+
+struct guc_mmio_regset {
+ struct __packed {
+ u32 offset;
+ u32 value;
+ u32 flags;
+ } registers[GUC_REGSET_MAX_REGISTERS];
+
+ u32 values_valid;
+ u32 number_of_registers;
+} __packed;
+
+struct guc_mmio_reg_state {
+ struct guc_mmio_regset global_reg;
+ struct guc_mmio_regset engine_reg[I915_NUM_RINGS];
+
+ /* MMIO registers that are set as non privileged */
+ struct __packed {
+ u32 mmio_start;
+ u32 offsets[GUC_MMIO_WHITE_LIST_MAX];
+ u32 count;
+ } mmio_white_list[I915_NUM_RINGS];
+} __packed;
+
+/* GuC Additional Data Struct */
+
+struct guc_ads {
+ u32 reg_state_addr;
+ u32 reg_state_buffer;
+ u32 golden_context_lrca;
+ u32 scheduler_policies;
+ u32 reserved0[3];
+ u32 eng_state_size[I915_NUM_RINGS];
+ u32 reserved2[4];
+} __packed;
+
/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
enum host2guc_action {
HOST2GUC_ACTION_DEFAULT = 0x0,
i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
}
+ if (guc->ads_obj) {
+ u32 ads = (u32)i915_gem_obj_ggtt_offset(guc->ads_obj)
+ >> PAGE_SHIFT;
+ params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
+ params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
+ }
+
/* If GuC submission is enabled, set up additional parameters here */
if (i915.enable_guc_submission) {
u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj);
direct_interrupts_to_host(dev_priv);
i915_guc_submission_disable(dev);
+ i915_guc_submission_fini(dev);
return err;
}
DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
guc_fw->guc_fw_path, err);
+ mutex_lock(&dev->struct_mutex);
obj = guc_fw->guc_fw_obj;
if (obj)
drm_gem_object_unreference(&obj->base);
guc_fw->guc_fw_obj = NULL;
+ mutex_unlock(&dev->struct_mutex);
release_firmware(fw); /* OK even if fw is NULL */
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+ mutex_lock(&dev->struct_mutex);
direct_interrupts_to_host(dev_priv);
+ i915_guc_submission_disable(dev);
i915_guc_submission_fini(dev);
- mutex_lock(&dev->struct_mutex);
if (guc_fw->guc_fw_obj)
drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
guc_fw->guc_fw_obj = NULL;
enum port port = intel_dig_port->port;
uint8_t alternate_ddc_pin;
+ if (WARN(intel_dig_port->max_lanes < 4,
+ "Not enough lanes (%d) for HDMI on port %c\n",
+ intel_dig_port->max_lanes, port_name(port)))
+ return;
+
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
dev_priv->dig_port_map[port] = intel_encoder;
intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
+ intel_dig_port->max_lanes = 4;
intel_hdmi_init_connector(intel_dig_port, intel_connector);
}
return 0;
}
+static void
+logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+
+ ring->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
+ (ring->id == VCS || ring->id == VCS2);
+
+ ring->ctx_desc_template = GEN8_CTX_VALID;
+ ring->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
+ GEN8_CTX_ADDRESSING_MODE_SHIFT;
+ if (IS_GEN8(dev))
+ ring->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
+ ring->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
+
+ /* TODO: WaDisableLiteRestore when we start using semaphore
+ * signalling between Command Streamers */
+ /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
+
+ /* WaEnableForceRestoreInCtxtDescForVCS:skl */
+ /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
+ if (ring->disable_lite_restore_wa)
+ ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
+}
+
/**
- * intel_execlists_ctx_id() - get the Execlists Context ID
- * @ctx_obj: Logical Ring Context backing object.
+ * intel_lr_context_descriptor_update() - calculate & cache the descriptor
+ * descriptor for a pinned context
*
- * Do not confuse with ctx->id! Unfortunately we have a name overload
- * here: the old context ID we pass to userspace as a handler so that
- * they can refer to a context, and the new context ID we pass to the
- * ELSP so that the GPU can inform us of the context status via
- * interrupts.
+ * @ctx: Context to work on
+ * @ring: Engine the descriptor will be used with
*
- * Return: 20-bits globally unique context ID.
+ * The context descriptor encodes various attributes of a context,
+ * including its GTT address and some flags. Because it's fairly
+ * expensive to calculate, we'll just do it once and cache the result,
+ * which remains valid until the context is unpinned.
+ *
+ * This is what a descriptor looks like, from LSB to MSB:
+ * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
+ * bits 12-31: LRCA, GTT address of (the HWSP of) this context
+ * bits 32-51: ctx ID, a globally unique tag (the LRCA again!)
+ * bits 52-63: reserved, may encode the engine ID (for GuC)
*/
-u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
+static void
+intel_lr_context_descriptor_update(struct intel_context *ctx,
+ struct intel_engine_cs *ring)
{
- u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
- LRC_PPHWSP_PN * PAGE_SIZE;
+ uint64_t lrca, desc;
- /* LRCA is required to be 4K aligned so the more significant 20 bits
- * are globally unique */
- return lrca >> 12;
-}
+ lrca = ctx->engine[ring->id].lrc_vma->node.start +
+ LRC_PPHWSP_PN * PAGE_SIZE;
-static bool disable_lite_restore_wa(struct intel_engine_cs *ring)
-{
- struct drm_device *dev = ring->dev;
+ desc = ring->ctx_desc_template; /* bits 0-11 */
+ desc |= lrca; /* bits 12-31 */
+ desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
- return (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
- (ring->id == VCS || ring->id == VCS2);
+ ctx->engine[ring->id].lrc_desc = desc;
}
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
struct intel_engine_cs *ring)
{
- struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
- uint64_t desc;
- uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
- LRC_PPHWSP_PN * PAGE_SIZE;
-
- WARN_ON(lrca & 0xFFFFFFFF00000FFFULL);
-
- desc = GEN8_CTX_VALID;
- desc |= GEN8_CTX_ADDRESSING_MODE(dev) << GEN8_CTX_ADDRESSING_MODE_SHIFT;
- if (IS_GEN8(ctx_obj->base.dev))
- desc |= GEN8_CTX_L3LLC_COHERENT;
- desc |= GEN8_CTX_PRIVILEGE;
- desc |= lrca;
- desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT;
-
- /* TODO: WaDisableLiteRestore when we start using semaphore
- * signalling between Command Streamers */
- /* desc |= GEN8_CTX_FORCE_RESTORE; */
-
- /* WaEnableForceRestoreInCtxtDescForVCS:skl */
- /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
- if (disable_lite_restore_wa(ring))
- desc |= GEN8_CTX_FORCE_RESTORE;
+ return ctx->engine[ring->id].lrc_desc;
+}
- return desc;
+/**
+ * intel_execlists_ctx_id() - get the Execlists Context ID
+ * @ctx: Context to get the ID for
+ * @ring: Engine to get the ID for
+ *
+ * Do not confuse with ctx->id! Unfortunately we have a name overload
+ * here: the old context ID we pass to userspace as a handler so that
+ * they can refer to a context, and the new context ID we pass to the
+ * ELSP so that the GPU can inform us of the context status via
+ * interrupts.
+ *
+ * The context ID is a portion of the context descriptor, so we can
+ * just extract the required part from the cached descriptor.
+ *
+ * Return: 20-bits globally unique context ID.
+ */
+u32 intel_execlists_ctx_id(struct intel_context *ctx,
+ struct intel_engine_cs *ring)
+{
+ return intel_lr_context_descriptor(ctx, ring) >> GEN8_CTX_ID_SHIFT;
}
static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
{
struct intel_engine_cs *ring = rq->ring;
struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
- struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
- struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
- struct page *page;
- uint32_t *reg_state;
-
- BUG_ON(!ctx_obj);
- WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
- WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
-
- page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
- reg_state = kmap_atomic(page);
+ uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state;
reg_state[CTX_RING_TAIL+1] = rq->tail;
- reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
+ reg_state[CTX_RING_BUFFER_START+1] = rq->ringbuf->vma->node.start;
if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
/* True 32b PPGTT with dynamic page allocation: update PDP
ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
}
- kunmap_atomic(reg_state);
-
return 0;
}
/* Same ctx: ignore first request, as second request
* will update tail past first request's workload */
cursor->elsp_submitted = req0->elsp_submitted;
- list_del(&req0->execlist_link);
- list_add_tail(&req0->execlist_link,
- &ring->execlist_retired_req_list);
+ list_move_tail(&req0->execlist_link,
+ &ring->execlist_retired_req_list);
req0 = cursor;
} else {
req1 = cursor;
execlist_link);
if (head_req != NULL) {
- struct drm_i915_gem_object *ctx_obj =
- head_req->ctx->engine[ring->id].state;
- if (intel_execlists_ctx_id(ctx_obj) == request_id) {
+ if (intel_execlists_ctx_id(head_req->ctx, ring) == request_id) {
WARN(head_req->elsp_submitted == 0,
"Never submitted head request\n");
if (--head_req->elsp_submitted <= 0) {
- list_del(&head_req->execlist_link);
- list_add_tail(&head_req->execlist_link,
- &ring->execlist_retired_req_list);
+ list_move_tail(&head_req->execlist_link,
+ &ring->execlist_retired_req_list);
return true;
}
}
return false;
}
+static void get_context_status(struct intel_engine_cs *ring,
+ u8 read_pointer,
+ u32 *status, u32 *context_id)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ if (WARN_ON(read_pointer >= GEN8_CSB_ENTRIES))
+ return;
+
+ *status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer));
+ *context_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer));
+}
+
/**
* intel_lrc_irq_handler() - handle Context Switch interrupts
* @ring: Engine Command Streamer to handle.
status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
read_pointer = ring->next_context_status_buffer;
- write_pointer = status_pointer & GEN8_CSB_PTR_MASK;
+ write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
if (read_pointer > write_pointer)
write_pointer += GEN8_CSB_ENTRIES;
spin_lock(&ring->execlist_lock);
while (read_pointer < write_pointer) {
- read_pointer++;
- status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer % GEN8_CSB_ENTRIES));
- status_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer % GEN8_CSB_ENTRIES));
+
+ get_context_status(ring, ++read_pointer % GEN8_CSB_ENTRIES,
+ &status, &status_id);
if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
continue;
WARN(1, "Preemption without Lite Restore\n");
}
- if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
- (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
+ if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
+ (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
if (execlists_check_remove_request(ring, status_id))
submit_contexts++;
}
}
- if (disable_lite_restore_wa(ring)) {
+ if (ring->disable_lite_restore_wa) {
/* Prevent a ctx to preempt itself */
if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) &&
(submit_contexts != 0))
spin_unlock(&ring->execlist_lock);
- WARN(submit_contexts > 2, "More than two context complete events?\n");
+ if (unlikely(submit_contexts > 2))
+ DRM_ERROR("More than two context complete events?\n");
+
ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
+ /* Update the read pointer to the old write pointer. Manual ringbuffer
+ * management ftw </sarcasm> */
I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
- _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8,
- ((u32)ring->next_context_status_buffer &
- GEN8_CSB_PTR_MASK) << 8));
+ _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
+ ring->next_context_status_buffer << 8));
}
static int execlists_context_queue(struct drm_i915_gem_request *request)
struct drm_i915_gem_request *cursor;
int num_elements = 0;
- if (request->ctx != ring->default_context)
+ if (request->ctx != request->i915->kernel_context)
intel_lr_context_pin(request);
i915_gem_request_reference(request);
if (request->ctx == tail_req->ctx) {
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
- list_del(&tail_req->execlist_link);
- list_add_tail(&tail_req->execlist_link,
- &ring->execlist_retired_req_list);
+ list_move_tail(&tail_req->execlist_link,
+ &ring->execlist_retired_req_list);
}
}
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
- int ret;
+ int ret = 0;
request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
- if (request->ctx != request->ring->default_context) {
- ret = intel_lr_context_pin(request);
+ if (i915.enable_guc_submission) {
+ /*
+ * Check that the GuC has space for the request before
+ * going any further, as the i915_add_request() call
+ * later on mustn't fail ...
+ */
+ struct intel_guc *guc = &request->i915->guc;
+
+ ret = i915_guc_wq_check_space(guc->execbuf_client);
if (ret)
return ret;
}
- return 0;
+ if (request->ctx != request->i915->kernel_context)
+ ret = intel_lr_context_pin(request);
+
+ return ret;
}
static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
* on a queue waiting for the ELSP to be ready to accept a new context submission. At that
* point, the tail *inside* the context is updated and the ELSP written to.
*/
-static void
+static int
intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
{
- struct intel_engine_cs *ring = request->ring;
+ struct intel_ringbuffer *ringbuf = request->ringbuf;
struct drm_i915_private *dev_priv = request->i915;
- intel_logical_ring_advance(request->ringbuf);
+ intel_logical_ring_advance(ringbuf);
+ request->tail = ringbuf->tail;
- request->tail = request->ringbuf->tail;
+ /*
+ * Here we add two extra NOOPs as padding to avoid
+ * lite restore of a context with HEAD==TAIL.
+ *
+ * Caller must reserve WA_TAIL_DWORDS for us!
+ */
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
+ intel_logical_ring_advance(ringbuf);
- if (intel_ring_stopped(ring))
- return;
+ if (intel_ring_stopped(request->ring))
+ return 0;
if (dev_priv->guc.execbuf_client)
i915_guc_submit(dev_priv->guc.execbuf_client, request);
else
execlists_context_queue(request);
+
+ return 0;
}
static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
struct drm_i915_gem_object *ctx_obj =
ctx->engine[ring->id].state;
- if (ctx_obj && (ctx != ring->default_context))
+ if (ctx_obj && (ctx != req->i915->kernel_context))
intel_lr_context_unpin(req);
list_del(&req->execlist_link);
i915_gem_request_unreference(req);
}
static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
- struct drm_i915_gem_object *ctx_obj,
- struct intel_ringbuffer *ringbuf)
+ struct intel_context *ctx)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret = 0;
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
+ struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+ struct page *lrc_state_page;
+ int ret;
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (ret)
return ret;
+ lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
+ if (WARN_ON(!lrc_state_page)) {
+ ret = -ENODEV;
+ goto unpin_ctx_obj;
+ }
+
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
if (ret)
goto unpin_ctx_obj;
+ ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
+ intel_lr_context_descriptor_update(ctx, ring);
+ ctx->engine[ring->id].lrc_reg_state = kmap(lrc_state_page);
ctx_obj->dirty = true;
/* Invalidate GuC TLB. */
{
int ret = 0;
struct intel_engine_cs *ring = rq->ring;
- struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
- struct intel_ringbuffer *ringbuf = rq->ringbuf;
if (rq->ctx->engine[ring->id].pin_count++ == 0) {
- ret = intel_lr_context_do_pin(ring, ctx_obj, ringbuf);
+ ret = intel_lr_context_do_pin(ring, rq->ctx);
if (ret)
goto reset_pin_count;
}
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf = rq->ringbuf;
- if (ctx_obj) {
- WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
- if (--rq->ctx->engine[ring->id].pin_count == 0) {
- intel_unpin_ringbuffer_obj(ringbuf);
- i915_gem_object_ggtt_unpin(ctx_obj);
- }
+ WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
+ if (!ctx_obj)
+ return;
+
+ if (--rq->ctx->engine[ring->id].pin_count == 0) {
+ kunmap(kmap_to_page(rq->ctx->engine[ring->id].lrc_reg_state));
+ intel_unpin_ringbuffer_obj(ringbuf);
+ i915_gem_object_ggtt_unpin(ctx_obj);
+ rq->ctx->engine[ring->id].lrc_vma = NULL;
+ rq->ctx->engine[ring->id].lrc_desc = 0;
+ rq->ctx->engine[ring->id].lrc_reg_state = NULL;
}
}
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
- if (WARN_ON_ONCE(w->count == 0))
+ if (w->count == 0)
return 0;
ring->gpu_caches_dirty = true;
u8 next_context_status_buffer_hw;
lrc_setup_hardware_status_page(ring,
- ring->default_context->engine[ring->id].state);
+ dev_priv->kernel_context->engine[ring->id].state);
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
* | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
* BDW | CSB regs not reset | CSB regs reset |
* CHT | CSB regs not reset | CSB regs not reset |
+ * SKL | ? | ? |
+ * BXT | ? | ? |
*/
- next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring))
- & GEN8_CSB_PTR_MASK);
+ next_context_status_buffer_hw =
+ GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(ring)));
/*
* When the CSB registers are reset (also after power-up / gpu reset),
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *ring = ringbuf->ring;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
- bool vf_flush_wa;
+ bool vf_flush_wa = false;
u32 flags = 0;
int ret;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
- }
- /*
- * On GEN9+ Before VF_CACHE_INVALIDATE we need to emit a NULL pipe
- * control.
- */
- vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 &&
- flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ /*
+ * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
+ * pipe control.
+ */
+ if (IS_GEN9(ring->dev))
+ vf_flush_wa = true;
+ }
ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
if (ret)
intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
}
+/*
+ * Reserve space for 2 NOOPs at the end of each request to be
+ * used as a workaround for not being allowed to do lite
+ * restore with HEAD==TAIL (WaIdleLiteRestore).
+ */
+#define WA_TAIL_DWORDS 2
+
+static inline u32 hws_seqno_address(struct intel_engine_cs *engine)
+{
+ return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
+}
+
static int gen8_emit_request(struct drm_i915_gem_request *request)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
- struct intel_engine_cs *ring = ringbuf->ring;
- u32 cmd;
int ret;
- /*
- * Reserve space for 2 NOOPs at the end of each request to be
- * used as a workaround for not being allowed to do lite
- * restore with HEAD==TAIL (WaIdleLiteRestore).
- */
- ret = intel_logical_ring_begin(request, 8);
+ ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
if (ret)
return ret;
- cmd = MI_STORE_DWORD_IMM_GEN4;
- cmd |= MI_GLOBAL_GTT;
+ /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
+ BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
- intel_logical_ring_emit(ringbuf, cmd);
intel_logical_ring_emit(ringbuf,
- (ring->status_page.gfx_addr +
- (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
+ (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
+ intel_logical_ring_emit(ringbuf,
+ hws_seqno_address(request->ring) |
+ MI_FLUSH_DW_USE_GTT);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP);
- intel_logical_ring_advance_and_submit(request);
+ return intel_logical_ring_advance_and_submit(request);
+}
- /*
- * Here we add two extra NOOPs as padding to avoid
- * lite restore of a context with HEAD==TAIL.
- */
- intel_logical_ring_emit(ringbuf, MI_NOOP);
- intel_logical_ring_emit(ringbuf, MI_NOOP);
- intel_logical_ring_advance(ringbuf);
+static int gen8_emit_request_render(struct drm_i915_gem_request *request)
+{
+ struct intel_ringbuffer *ringbuf = request->ringbuf;
+ int ret;
- return 0;
+ ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
+ if (ret)
+ return ret;
+
+ /* w/a for post sync ops following a GPGPU operation we
+ * need a prior CS_STALL, which is emitted by the flush
+ * following the batch.
+ */
+ intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
+ intel_logical_ring_emit(ringbuf,
+ (PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE));
+ intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+ intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
+ return intel_logical_ring_advance_and_submit(request);
}
static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
ring->status_page.obj = NULL;
}
+ ring->disable_lite_restore_wa = false;
+ ring->ctx_desc_template = 0;
+
lrc_destroy_wa_ctx_obj(ring);
ring->dev = NULL;
}
-static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
+static void
+logical_ring_default_vfuncs(struct drm_device *dev,
+ struct intel_engine_cs *ring)
{
+ /* Default vfuncs which can be overriden by each engine. */
+ ring->init_hw = gen8_init_common_ring;
+ ring->emit_request = gen8_emit_request;
+ ring->emit_flush = gen8_emit_flush;
+ ring->irq_get = gen8_logical_ring_get_irq;
+ ring->irq_put = gen8_logical_ring_put_irq;
+ ring->emit_bb_start = gen8_emit_bb_start;
+ if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ ring->get_seqno = bxt_a_get_seqno;
+ ring->set_seqno = bxt_a_set_seqno;
+ } else {
+ ring->get_seqno = gen8_get_seqno;
+ ring->set_seqno = gen8_set_seqno;
+ }
+}
+
+static inline void
+logical_ring_default_irqs(struct intel_engine_cs *ring, unsigned shift)
+{
+ ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
+ ring->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+}
+
+static int
+logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
+{
+ struct intel_context *dctx = to_i915(dev)->kernel_context;
int ret;
/* Intentionally left blank. */
INIT_LIST_HEAD(&ring->execlist_retired_req_list);
spin_lock_init(&ring->execlist_lock);
+ logical_ring_init_platform_invariants(ring);
+
ret = i915_cmd_parser_init_ring(ring);
if (ret)
goto error;
- ret = intel_lr_context_deferred_alloc(ring->default_context, ring);
+ ret = intel_lr_context_deferred_alloc(dctx, ring);
if (ret)
goto error;
/* As this is the default context, always pin it */
- ret = intel_lr_context_do_pin(
- ring,
- ring->default_context->engine[ring->id].state,
- ring->default_context->engine[ring->id].ringbuf);
+ ret = intel_lr_context_do_pin(ring, dctx);
if (ret) {
DRM_ERROR(
"Failed to pin and map ringbuffer %s: %d\n",
ring->name = "render ring";
ring->id = RCS;
+ ring->exec_id = I915_EXEC_RENDER;
ring->mmio_base = RENDER_RING_BASE;
- ring->irq_enable_mask =
- GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
- ring->irq_keep_mask =
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
+
+ logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT);
if (HAS_L3_DPF(dev))
ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+ logical_ring_default_vfuncs(dev, ring);
+
+ /* Override some for render ring. */
if (INTEL_INFO(dev)->gen >= 9)
ring->init_hw = gen9_init_render_ring;
else
ring->init_hw = gen8_init_render_ring;
ring->init_context = gen8_init_rcs_context;
ring->cleanup = intel_fini_pipe_control;
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
- ring->get_seqno = bxt_a_get_seqno;
- ring->set_seqno = bxt_a_set_seqno;
- } else {
- ring->get_seqno = gen8_get_seqno;
- ring->set_seqno = gen8_set_seqno;
- }
- ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush_render;
- ring->irq_get = gen8_logical_ring_get_irq;
- ring->irq_put = gen8_logical_ring_put_irq;
- ring->emit_bb_start = gen8_emit_bb_start;
+ ring->emit_request = gen8_emit_request_render;
ring->dev = dev;
ring->name = "bsd ring";
ring->id = VCS;
+ ring->exec_id = I915_EXEC_BSD;
ring->mmio_base = GEN6_BSD_RING_BASE;
- ring->irq_enable_mask =
- GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
- ring->irq_keep_mask =
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
- ring->init_hw = gen8_init_common_ring;
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
- ring->get_seqno = bxt_a_get_seqno;
- ring->set_seqno = bxt_a_set_seqno;
- } else {
- ring->get_seqno = gen8_get_seqno;
- ring->set_seqno = gen8_set_seqno;
- }
- ring->emit_request = gen8_emit_request;
- ring->emit_flush = gen8_emit_flush;
- ring->irq_get = gen8_logical_ring_get_irq;
- ring->irq_put = gen8_logical_ring_put_irq;
- ring->emit_bb_start = gen8_emit_bb_start;
+ logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT);
+ logical_ring_default_vfuncs(dev, ring);
return logical_ring_init(dev, ring);
}
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
- ring->name = "bds2 ring";
+ ring->name = "bsd2 ring";
ring->id = VCS2;
+ ring->exec_id = I915_EXEC_BSD;
ring->mmio_base = GEN8_BSD2_RING_BASE;
- ring->irq_enable_mask =
- GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
- ring->irq_keep_mask =
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
- ring->init_hw = gen8_init_common_ring;
- ring->get_seqno = gen8_get_seqno;
- ring->set_seqno = gen8_set_seqno;
- ring->emit_request = gen8_emit_request;
- ring->emit_flush = gen8_emit_flush;
- ring->irq_get = gen8_logical_ring_get_irq;
- ring->irq_put = gen8_logical_ring_put_irq;
- ring->emit_bb_start = gen8_emit_bb_start;
+ logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT);
+ logical_ring_default_vfuncs(dev, ring);
return logical_ring_init(dev, ring);
}
ring->name = "blitter ring";
ring->id = BCS;
+ ring->exec_id = I915_EXEC_BLT;
ring->mmio_base = BLT_RING_BASE;
- ring->irq_enable_mask =
- GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
- ring->irq_keep_mask =
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
- ring->init_hw = gen8_init_common_ring;
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
- ring->get_seqno = bxt_a_get_seqno;
- ring->set_seqno = bxt_a_set_seqno;
- } else {
- ring->get_seqno = gen8_get_seqno;
- ring->set_seqno = gen8_set_seqno;
- }
- ring->emit_request = gen8_emit_request;
- ring->emit_flush = gen8_emit_flush;
- ring->irq_get = gen8_logical_ring_get_irq;
- ring->irq_put = gen8_logical_ring_put_irq;
- ring->emit_bb_start = gen8_emit_bb_start;
+ logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT);
+ logical_ring_default_vfuncs(dev, ring);
return logical_ring_init(dev, ring);
}
ring->name = "video enhancement ring";
ring->id = VECS;
+ ring->exec_id = I915_EXEC_VEBOX;
ring->mmio_base = VEBOX_RING_BASE;
- ring->irq_enable_mask =
- GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
- ring->irq_keep_mask =
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
- ring->init_hw = gen8_init_common_ring;
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
- ring->get_seqno = bxt_a_get_seqno;
- ring->set_seqno = bxt_a_set_seqno;
- } else {
- ring->get_seqno = gen8_get_seqno;
- ring->set_seqno = gen8_set_seqno;
- }
- ring->emit_request = gen8_emit_request;
- ring->emit_flush = gen8_emit_flush;
- ring->irq_get = gen8_logical_ring_get_irq;
- ring->irq_put = gen8_logical_ring_put_irq;
- ring->emit_bb_start = gen8_emit_bb_start;
+ logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT);
+ logical_ring_default_vfuncs(dev, ring);
return logical_ring_init(dev, ring);
}
{
int i;
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for (i = I915_NUM_RINGS; --i >= 0; ) {
+ struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
- if (ctx_obj) {
- struct intel_ringbuffer *ringbuf =
- ctx->engine[i].ringbuf;
- struct intel_engine_cs *ring = ringbuf->ring;
+ if (!ctx_obj)
+ continue;
- if (ctx == ring->default_context) {
- intel_unpin_ringbuffer_obj(ringbuf);
- i915_gem_object_ggtt_unpin(ctx_obj);
- }
- WARN_ON(ctx->engine[ring->id].pin_count);
- intel_ringbuffer_free(ringbuf);
- drm_gem_object_unreference(&ctx_obj->base);
+ if (ctx == ctx->i915->kernel_context) {
+ intel_unpin_ringbuffer_obj(ringbuf);
+ i915_gem_object_ggtt_unpin(ctx_obj);
}
+
+ WARN_ON(ctx->engine[i].pin_count);
+ intel_ringbuffer_free(ringbuf);
+ drm_gem_object_unreference(&ctx_obj->base);
}
}
-static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
+/**
+ * intel_lr_context_size() - return the size of the context for an engine
+ * @ring: which engine to find the context size for
+ *
+ * Each engine may require a different amount of space for a context image,
+ * so when allocating (or copying) an image, this function can be used to
+ * find the right size for the specific engine.
+ *
+ * Return: size (in bytes) of an engine-specific context image
+ *
+ * Note: this size includes the HWSP, which is part of the context image
+ * in LRC mode, but does not include the "shared data page" used with
+ * GuC submission. The caller should account for this if using the GuC.
+ */
+uint32_t intel_lr_context_size(struct intel_engine_cs *ring)
{
int ret = 0;
*/
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_gem_object *ctx_obj;
WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
WARN_ON(ctx->engine[ring->id].state);
- context_size = round_up(get_lr_context_size(ring), 4096);
+ context_size = round_up(intel_lr_context_size(ring), 4096);
/* One extra page as the sharing data between driver and GuC */
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
ctx->engine[ring->id].ringbuf = ringbuf;
ctx->engine[ring->id].state = ctx_obj;
- if (ctx != ring->default_context && ring->init_context) {
+ if (ctx != ctx->i915->kernel_context && ring->init_context) {
struct drm_i915_gem_request *req;
- ret = i915_gem_request_alloc(ring,
- ctx, &req);
- if (ret) {
- DRM_ERROR("ring create req: %d\n",
- ret);
+ req = i915_gem_request_alloc(ring, ctx);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
+ DRM_ERROR("ring create req: %d\n", ret);
goto error_ringbuf;
}
#define _INTEL_LRC_H_
#define GEN8_LR_CONTEXT_ALIGN 4096
-#define GEN8_CSB_ENTRIES 6
-#define GEN8_CSB_PTR_MASK 0x07
/* Execlists regs */
#define RING_ELSP(ring) _MMIO((ring)->mmio_base + 0x230)
#define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
#define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0)
+/* The docs specify that the write pointer wraps around after 5h, "After status
+ * is written out to the last available status QW at offset 5h, this pointer
+ * wraps to 0."
+ *
+ * Therefore, one must infer than even though there are 3 bits available, 6 and
+ * 7 appear to be * reserved.
+ */
+#define GEN8_CSB_ENTRIES 6
+#define GEN8_CSB_PTR_MASK 0x7
+#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
+#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
+#define GEN8_CSB_WRITE_PTR(csb_status) \
+ (((csb_status) & GEN8_CSB_WRITE_PTR_MASK) >> 0)
+#define GEN8_CSB_READ_PTR(csb_status) \
+ (((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8)
+
/* Logical Rings */
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
void intel_lr_context_free(struct intel_context *ctx);
+uint32_t intel_lr_context_size(struct intel_engine_cs *ring);
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
struct intel_engine_cs *ring);
void intel_lr_context_unpin(struct drm_i915_gem_request *req);
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
struct intel_engine_cs *ring);
+u32 intel_execlists_ctx_id(struct intel_context *ctx,
+ struct intel_engine_cs *ring);
+
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
struct i915_execbuffer_params;
int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas);
-u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
void intel_lrc_irq_handler(struct intel_engine_cs *ring);
void intel_execlists_retire_requests(struct intel_engine_cs *ring);
WARN_ON(overlay->active);
WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
- ret = i915_gem_request_alloc(ring, ring->default_context, &req);
- if (ret)
- return ret;
+ req = i915_gem_request_alloc(ring, NULL);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
ret = intel_ring_begin(req, 4);
if (ret) {
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- ret = i915_gem_request_alloc(ring, ring->default_context, &req);
- if (ret)
- return ret;
+ req = i915_gem_request_alloc(ring, NULL);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
ret = intel_ring_begin(req, 2);
if (ret) {
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- ret = i915_gem_request_alloc(ring, ring->default_context, &req);
- if (ret)
- return ret;
+ req = i915_gem_request_alloc(ring, NULL);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
ret = intel_ring_begin(req, 6);
if (ret) {
/* synchronous slowpath */
struct drm_i915_gem_request *req;
- ret = i915_gem_request_alloc(ring, ring->default_context, &req);
- if (ret)
- return ret;
+ req = i915_gem_request_alloc(ring, NULL);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
ret = intel_ring_begin(req, 2);
if (ret) {
#include <linux/module.h>
/**
+ * DOC: RC6
+ *
* RC6 is a special power stage which allows the GPU to enter an very
* low-voltage mode when idle, using down to 0V while at this stage. This
* stage is entered automatically when the GPU is idle when RC6 support is
if (pipe_h < pfit_h)
pipe_h = pfit_h;
+ if (WARN_ON(!pfit_w || !pfit_h))
+ return pixel_rate;
+
pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
pfit_w * pfit_h);
}
if (WARN(latency == 0, "Latency value missing\n"))
return UINT_MAX;
+ if (WARN_ON(!pipe_htotal))
+ return UINT_MAX;
ret = (latency * pixel_rate) / (pipe_htotal * 10000);
ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
uint8_t bytes_per_pixel)
{
+ /*
+ * Neither of these should be possible since this function shouldn't be
+ * called if the CRTC is off or the plane is invisible. But let's be
+ * extra paranoid to avoid a potential divide-by-zero if we screw up
+ * elsewhere in the driver.
+ */
+ if (WARN_ON(!bytes_per_pixel))
+ return 0;
+ if (WARN_ON(!horiz_pixels))
+ return 0;
+
return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
}
}
static uint32_t
-hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
+hsw_compute_linetime_wm(struct drm_device *dev,
+ struct intel_crtc_state *cstate)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode =
+ &cstate->base.adjusted_mode;
u32 linetime, ips_linetime;
- if (!intel_crtc->active)
+ if (!cstate->base.active)
+ return 0;
+ if (WARN_ON(adjusted_mode->crtc_clock == 0))
+ return 0;
+ if (WARN_ON(dev_priv->cdclk_freq == 0))
return 0;
/* The WM are computed with base on how long it takes to fill a single
return PTR_ERR(cstate);
pipe_wm = &cstate->wm.optimal.ilk;
+ memset(pipe_wm, 0, sizeof(*pipe_wm));
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
ps = drm_atomic_get_plane_state(state,
pristate, sprstate, curstate, &pipe_wm->wm[0]);
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- pipe_wm->linetime = hsw_compute_linetime_wm(dev,
- &intel_crtc->base);
+ pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate);
/* LP0 watermarks always use 1/2 DDB partitioning */
ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
dev_priv->wm.skl_hw = *results;
}
-static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
+static void ilk_compute_wm_config(struct drm_device *dev,
+ struct intel_wm_config *config)
{
- struct drm_device *dev = dev_priv->dev;
+ struct intel_crtc *crtc;
+
+ /* Compute the currently _active_ config */
+ for_each_intel_crtc(dev, crtc) {
+ const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
+
+ if (!wm->pipe_enabled)
+ continue;
+
+ config->sprites_enabled |= wm->sprites_enabled;
+ config->sprites_scaled |= wm->sprites_scaled;
+ config->num_pipes_active++;
+ }
+}
+
+static void ilk_program_watermarks(struct intel_crtc_state *cstate)
+{
+ struct drm_crtc *crtc = cstate->base.crtc;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct ilk_wm_maximums max;
- struct intel_wm_config *config = &dev_priv->wm.config;
+ struct intel_wm_config config = {};
struct ilk_wm_values results = {};
enum intel_ddb_partitioning partitioning;
- ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_1_2, &max);
- ilk_wm_merge(dev, config, &max, &lp_wm_1_2);
+ ilk_compute_wm_config(dev, &config);
+
+ ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
/* 5/6 split only in single pipe config on IVB+ */
if (INTEL_INFO(dev)->gen >= 7 &&
- config->num_pipes_active == 1 && config->sprites_enabled) {
- ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_5_6, &max);
- ilk_wm_merge(dev, config, &max, &lp_wm_5_6);
+ config.num_pipes_active == 1 && config.sprites_enabled) {
+ ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
+ ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
} else {
static void ilk_update_wm(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
- ilk_program_watermarks(dev_priv);
+ ilk_program_watermarks(cstate);
}
static void skl_pipe_wm_active_state(uint32_t val,
dev_priv->display.update_wm(crtc);
}
-/**
+/*
* Lock protecting IPS related data structures
*/
DEFINE_SPINLOCK(mchdev_lock);
}
if (HAS_RC6p(dev))
DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
- (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
- (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
- (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+ onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
+ onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
+ onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
else
DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
- (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
+ onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
}
static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
/* 3a: Enable RC6 */
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
- DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
- "on" : "off");
+ DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
/* WaRsUseTimeoutMode */
if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
* 3b: Enable Coarse Power Gating only when RC6 is enabled.
* WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
*/
- if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
- ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0)))
+ if (NEEDS_WaRsDisableCoarsePowerGating(dev))
I915_WRITE(GEN9_PG_ENABLE, 0);
else
I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
dev_priv->display.update_wm = ilk_update_wm;
dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
+ dev_priv->display.program_watermarks = ilk_program_watermarks;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
offset = cs_offset;
}
- ret = intel_ring_begin(req, 4);
+ ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE));
- intel_ring_emit(ring, offset + len - 8);
- intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
return 0;
}
+static void cleanup_phys_status_page(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = to_i915(ring->dev);
+
+ if (!dev_priv->status_page_dmah)
+ return;
+
+ drm_pci_free(ring->dev, dev_priv->status_page_dmah);
+ ring->status_page.page_addr = NULL;
+}
+
static void cleanup_status_page(struct intel_engine_cs *ring)
{
struct drm_i915_gem_object *obj;
static int init_status_page(struct intel_engine_cs *ring)
{
- struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_object *obj = ring->status_page.obj;
- if ((obj = ring->status_page.obj) == NULL) {
+ if (obj == NULL) {
unsigned flags;
int ret;
else
iounmap(ringbuf->virtual_start);
ringbuf->virtual_start = NULL;
+ ringbuf->vma = NULL;
i915_gem_object_ggtt_unpin(ringbuf->obj);
}
}
}
+ ringbuf->vma = i915_gem_obj_to_ggtt(obj);
+
return 0;
}
if (ret)
goto error;
} else {
- BUG_ON(ring->id != RCS);
+ WARN_ON(ring->id != RCS);
ret = init_phys_status_page(ring);
if (ret)
goto error;
if (ring->cleanup)
ring->cleanup(ring);
- cleanup_status_page(ring);
+ if (I915_NEED_GFX_HWS(ring->dev)) {
+ cleanup_status_page(ring);
+ } else {
+ WARN_ON(ring->id != RCS);
+ cleanup_phys_status_page(ring);
+ }
i915_cmd_parser_fini_ring(ring);
i915_gem_batch_pool_fini(&ring->batch_pool);
ring->name = "render ring";
ring->id = RCS;
+ ring->exec_id = I915_EXEC_RENDER;
ring->mmio_base = RENDER_RING_BASE;
if (INTEL_INFO(dev)->gen >= 8) {
ring->name = "bsd ring";
ring->id = VCS;
+ ring->exec_id = I915_EXEC_BSD;
ring->write_tail = ring_write_tail;
if (INTEL_INFO(dev)->gen >= 6) {
ring->name = "bsd2 ring";
ring->id = VCS2;
+ ring->exec_id = I915_EXEC_BSD;
ring->write_tail = ring_write_tail;
ring->mmio_base = GEN8_BSD2_RING_BASE;
ring->name = "blitter ring";
ring->id = BCS;
+ ring->exec_id = I915_EXEC_BLT;
ring->mmio_base = BLT_RING_BASE;
ring->write_tail = ring_write_tail;
ring->name = "video enhancement ring";
ring->id = VECS;
+ ring->exec_id = I915_EXEC_VEBOX;
ring->mmio_base = VEBOX_RING_BASE;
ring->write_tail = ring_write_tail;
int score;
enum intel_ring_hangcheck_action action;
int deadlock;
+ u32 instdone[I915_NUM_INSTDONE_REG];
};
struct intel_ringbuffer {
struct drm_i915_gem_object *obj;
void __iomem *virtual_start;
+ struct i915_vma *vma;
struct intel_engine_cs *ring;
struct list_head link;
struct intel_engine_cs {
const char *name;
enum intel_ring_id {
- RCS = 0x0,
- VCS,
+ RCS = 0,
BCS,
- VECS,
- VCS2
+ VCS,
+ VCS2, /* Keep instances of the same type engine together. */
+ VECS
} id;
#define I915_NUM_RINGS 5
-#define LAST_USER_RING (VECS + 1)
+#define _VCS(n) (VCS + (n))
+ unsigned int exec_id;
u32 mmio_base;
struct drm_device *dev;
struct intel_ringbuffer *buffer;
struct list_head execlist_queue;
struct list_head execlist_retired_req_list;
u8 next_context_status_buffer;
+ bool disable_lite_restore_wa;
+ u32 ctx_desc_template;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
int (*emit_request)(struct drm_i915_gem_request *request);
int (*emit_flush)(struct drm_i915_gem_request *request,
wait_queue_head_t irq_queue;
- struct intel_context *default_context;
struct intel_context *last_context;
struct intel_ring_hangcheck hangcheck;
ring->status_page.page_addr[reg] = value;
}
-/**
+/*
* Reads a dword out of the status page, which is written to from the command
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
* MI_STORE_DATA_IMM.
* The area from dword 0x30 to 0x3ff is available for driver usage.
*/
#define I915_GEM_HWS_INDEX 0x30
+#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
#define I915_GEM_HWS_SCRATCH_INDEX 0x40
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
SKL_DISP_PW_2);
- WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
+ WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
+ "Platform doesn't support DC5.\n");
WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
{
struct drm_device *dev = dev_priv->dev;
- WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
+ WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
+ "Platform doesn't support DC6.\n");
WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
"Backlight is not disabled.\n");
{
assert_can_disable_dc5(dev_priv);
- if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
+ if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+ i915.enable_dc != 0 && i915.enable_dc != 1)
assert_can_disable_dc6(dev_priv);
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
static void skl_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
- struct drm_device *dev = dev_priv->dev;
uint32_t tmp, fuse_status;
uint32_t req_mask, state_mask;
bool is_enabled, enable_requested, check_fuse_status = false;
!I915_READ(HSW_PWR_WELL_BIOS),
"Invalid for power well status to be enabled, unless done by the BIOS, \
when request is to disable!\n");
- if (power_well->data == SKL_DISP_PW_2) {
- /*
- * DDI buffer programming unnecessary during
- * driver-load/resume as it's already done
- * during modeset initialization then. It's
- * also invalid here as encoder list is still
- * uninitialized.
- */
- if (!dev_priv->power_domains.initializing)
- intel_prepare_ddi(dev);
- }
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
}
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
+ if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+ i915.enable_dc != 0 && i915.enable_dc != 1)
skl_enable_dc6(dev_priv);
else
gen9_enable_dc5(dev_priv);
if (power_well->count > 0) {
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
} else {
- if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 &&
+ if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+ i915.enable_dc != 0 &&
i915.enable_dc != 1)
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
else
{
struct i915_power_well *well;
- if (!IS_SKYLAKE(dev_priv))
+ if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
return;
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
{
struct i915_power_well *well;
- if (!IS_SKYLAKE(dev_priv))
+ if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
return;
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
* Eric Anholt <eric@anholt.net>
*/
-/**
- * @file SDVO command definitions and structures.
+/*
+ * SDVO command definitions and structures.
*/
#define SDVO_OUTPUT_FIRST (0)
#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
#define DTD_FLAG_INTERLACE (1 << 7)
-/** This matches the EDID DTD structure, more or less */
+/* This matches the EDID DTD structure, more or less */
struct intel_sdvo_dtd {
struct {
- u16 clock; /**< pixel clock, in 10kHz units */
- u8 h_active; /**< lower 8 bits (pixels) */
- u8 h_blank; /**< lower 8 bits (pixels) */
- u8 h_high; /**< upper 4 bits each h_active, h_blank */
- u8 v_active; /**< lower 8 bits (lines) */
- u8 v_blank; /**< lower 8 bits (lines) */
- u8 v_high; /**< upper 4 bits each v_active, v_blank */
+ u16 clock; /* pixel clock, in 10kHz units */
+ u8 h_active; /* lower 8 bits (pixels) */
+ u8 h_blank; /* lower 8 bits (pixels) */
+ u8 h_high; /* upper 4 bits each h_active, h_blank */
+ u8 v_active; /* lower 8 bits (lines) */
+ u8 v_blank; /* lower 8 bits (lines) */
+ u8 v_high; /* upper 4 bits each v_active, v_blank */
} part1;
struct {
- u8 h_sync_off; /**< lower 8 bits, from hblank start */
- u8 h_sync_width; /**< lower 8 bits (pixels) */
- /** lower 4 bits each vsync offset, vsync width */
+ u8 h_sync_off; /* lower 8 bits, from hblank start */
+ u8 h_sync_width; /* lower 8 bits (pixels) */
+ /* lower 4 bits each vsync offset, vsync width */
u8 v_sync_off_width;
- /**
+ /*
* 2 high bits of hsync offset, 2 high bits of hsync width,
* bits 4-5 of vsync offset, and 2 high bits of vsync width.
*/
u8 sync_off_width_high;
u8 dtd_flags;
u8 sdvo_flags;
- /** bits 6-7 of vsync offset at bits 6-7 */
+ /* bits 6-7 of vsync offset at bits 6-7 */
u8 v_sync_off_high;
u8 reserved;
} part2;
} __packed;
struct intel_sdvo_pixel_clock_range {
- u16 min; /**< pixel clock, in 10kHz units */
- u16 max; /**< pixel clock, in 10kHz units */
+ u16 min; /* pixel clock, in 10kHz units */
+ u16 max; /* pixel clock, in 10kHz units */
} __packed;
struct intel_sdvo_preferred_input_timing_args {
#define SDVO_CMD_RESET 0x01
-/** Returns a struct intel_sdvo_caps */
+/* Returns a struct intel_sdvo_caps */
#define SDVO_CMD_GET_DEVICE_CAPS 0x02
#define SDVO_CMD_GET_FIRMWARE_REV 0x86
# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
-/**
+/*
* Reports which inputs are trained (managed to sync).
*
* Devices must have trained within 2 vsyncs of a mode change.
unsigned int pad:6;
} __packed;
-/** Returns a struct intel_sdvo_output_flags of active outputs. */
+/* Returns a struct intel_sdvo_output_flags of active outputs. */
#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
-/**
+/*
* Sets the current set of active outputs.
*
* Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
*/
#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
-/**
+/*
* Returns the current mapping of SDVO inputs to outputs on the device.
*
* Returns two struct intel_sdvo_output_flags structures.
u16 in0, in1;
};
-/**
+/*
* Sets the current mapping of SDVO inputs to outputs on the device.
*
* Takes two struct i380_sdvo_output_flags structures.
*/
#define SDVO_CMD_SET_IN_OUT_MAP 0x07
-/**
+/*
* Returns a struct intel_sdvo_output_flags of attached displays.
*/
#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
-/**
+/*
* Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
*/
#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
-/**
+/*
* Takes a struct intel_sdvo_output_flags.
*/
#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
-/**
+/*
* Returns a struct intel_sdvo_output_flags of displays with hot plug
* interrupts enabled.
*/
unsigned int pad:6;
} __packed;
-/**
+/*
* Selects which input is affected by future input commands.
*
* Commands affected include SET_INPUT_TIMINGS_PART[12],
unsigned int pad:7;
} __packed;
-/**
+/*
* Takes a struct intel_sdvo_output_flags of which outputs are targeted by
* future output commands.
*
# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
-/**
+/*
* Generates a DTD based on the given width, height, and flags.
*
* This will be supported by any device supporting scaling or interlaced
#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
-/** Returns a struct intel_sdvo_pixel_clock_range */
+/* Returns a struct intel_sdvo_pixel_clock_range */
#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
-/** Returns a struct intel_sdvo_pixel_clock_range */
+/* Returns a struct intel_sdvo_pixel_clock_range */
#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
-/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
+/* Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
-/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+/* Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
-/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+/* Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
-/** 6 bytes of bit flags for TV formats shared by all TV format functions */
+/* 6 bytes of bit flags for TV formats shared by all TV format functions */
struct intel_sdvo_tv_format {
unsigned int ntsc_m:1;
unsigned int ntsc_j:1;
#define SDVO_CMD_SET_TV_FORMAT 0x29
-/** Returns the resolutiosn that can be used with the given TV format */
+/* Returns the resolutiosn that can be used with the given TV format */
#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83
struct intel_sdvo_sdtv_resolution_request {
unsigned int ntsc_m:1;
#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d
#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e
#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f
-/**
+/*
* The panel power sequencing parameters are in units of milliseconds.
* The high fields are bits 8:9 of the 10-bit values.
*/
}
static void
-skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t x, uint32_t y,
- uint32_t src_w, uint32_t src_h)
+skl_update_plane(struct drm_plane *drm_plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_device *dev = drm_plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+ struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
u32 plane_ctl, stride_div, stride;
- const struct drm_intel_sprite_colorkey *key =
- &to_intel_plane_state(drm_plane->state)->ckey;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 surf_addr;
u32 tile_height, plane_offset, plane_size;
unsigned int rotation;
int x_offset, y_offset;
- struct intel_crtc_state *crtc_state = to_intel_crtc(crtc)->config;
- int scaler_id;
+ int crtc_x = plane_state->dst.x1;
+ int crtc_y = plane_state->dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->dst);
+ uint32_t x = plane_state->src.x1 >> 16;
+ uint32_t y = plane_state->src.y1 >> 16;
+ uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
+ uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
+ const struct intel_scaler *scaler =
+ &crtc_state->scaler_state.scalers[plane_state->scaler_id];
plane_ctl = PLANE_CTL_ENABLE |
PLANE_CTL_PIPE_GAMMA_ENABLE |
plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
- rotation = drm_plane->state->rotation;
+ rotation = plane_state->base.rotation;
plane_ctl |= skl_plane_ctl_rotation(rotation);
- stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
+ stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
fb->pixel_format);
- scaler_id = to_intel_plane_state(drm_plane->state)->scaler_id;
-
/* Sizes are 0 based */
src_w--;
src_h--;
surf_addr = intel_plane_obj_offset(intel_plane, obj, 0);
if (intel_rotation_90_or_270(rotation)) {
+ int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+
/* stride: Surface height in tiles */
- tile_height = intel_tile_height(dev, fb->pixel_format,
- fb->modifier[0], 0);
+ tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
stride = DIV_ROUND_UP(fb->height, tile_height);
plane_size = (src_w << 16) | src_h;
x_offset = stride * tile_height - y - (src_h + 1);
I915_WRITE(PLANE_SIZE(pipe, plane), plane_size);
/* program plane scaler */
- if (scaler_id >= 0) {
+ if (plane_state->scaler_id >= 0) {
uint32_t ps_ctrl = 0;
+ int scaler_id = plane_state->scaler_id;
DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
PS_PLANE_SEL(plane));
- ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) |
- crtc_state->scaler_state.scalers[scaler_id].mode;
+ ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode;
I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
}
static void
-vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t x, uint32_t y,
- uint32_t src_w, uint32_t src_h)
+vlv_update_plane(struct drm_plane *dplane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(dplane);
+ struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
u32 sprctl;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- const struct drm_intel_sprite_colorkey *key =
- &to_intel_plane_state(dplane->state)->ckey;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ int crtc_x = plane_state->dst.x1;
+ int crtc_y = plane_state->dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->dst);
+ uint32_t x = plane_state->src.x1 >> 16;
+ uint32_t y = plane_state->src.y1 >> 16;
+ uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
+ uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
sprctl = SP_ENABLE;
crtc_h--;
linear_offset = y * fb->pitches[0] + x * pixel_size;
- sprsurf_offset = intel_gen4_compute_page_offset(dev_priv,
- &x, &y,
- obj->tiling_mode,
- pixel_size,
- fb->pitches[0]);
+ sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
+ fb->modifier[0],
+ pixel_size,
+ fb->pitches[0]);
linear_offset -= sprsurf_offset;
- if (dplane->state->rotation == BIT(DRM_ROTATE_180)) {
+ if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
sprctl |= SP_ROTATE_180;
x += src_w;
}
static void
-ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t x, uint32_t y,
- uint32_t src_w, uint32_t src_h)
+ivb_update_plane(struct drm_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
enum pipe pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- const struct drm_intel_sprite_colorkey *key =
- &to_intel_plane_state(plane->state)->ckey;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ int crtc_x = plane_state->dst.x1;
+ int crtc_y = plane_state->dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->dst);
+ uint32_t x = plane_state->src.x1 >> 16;
+ uint32_t y = plane_state->src.y1 >> 16;
+ uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
+ uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
sprctl = SPRITE_ENABLE;
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
linear_offset = y * fb->pitches[0] + x * pixel_size;
- sprsurf_offset =
- intel_gen4_compute_page_offset(dev_priv,
- &x, &y, obj->tiling_mode,
- pixel_size, fb->pitches[0]);
+ sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
+ fb->modifier[0],
+ pixel_size,
+ fb->pitches[0]);
linear_offset -= sprsurf_offset;
- if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
+ if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
sprctl |= SPRITE_ROTATE_180;
/* HSW and BDW does this automagically in hardware */
}
static void
-ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t x, uint32_t y,
- uint32_t src_w, uint32_t src_h)
+ilk_update_plane(struct drm_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_plane->pipe;
unsigned long dvssurf_offset, linear_offset;
u32 dvscntr, dvsscale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- const struct drm_intel_sprite_colorkey *key =
- &to_intel_plane_state(plane->state)->ckey;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ int crtc_x = plane_state->dst.x1;
+ int crtc_y = plane_state->dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->dst);
+ uint32_t x = plane_state->src.x1 >> 16;
+ uint32_t y = plane_state->src.y1 >> 16;
+ uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
+ uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
dvscntr = DVS_ENABLE;
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
linear_offset = y * fb->pitches[0] + x * pixel_size;
- dvssurf_offset =
- intel_gen4_compute_page_offset(dev_priv,
- &x, &y, obj->tiling_mode,
- pixel_size, fb->pitches[0]);
+ dvssurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
+ fb->modifier[0],
+ pixel_size,
+ fb->pitches[0]);
linear_offset -= dvssurf_offset;
- if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
+ if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
dvscntr |= DVS_ROTATE_180;
x += src_w;
return 0;
}
-static void
-intel_commit_sprite_plane(struct drm_plane *plane,
- struct intel_plane_state *state)
-{
- struct drm_crtc *crtc = state->base.crtc;
- struct intel_plane *intel_plane = to_intel_plane(plane);
- struct drm_framebuffer *fb = state->base.fb;
-
- crtc = crtc ? crtc : plane->crtc;
-
- if (state->visible) {
- intel_plane->update_plane(plane, crtc, fb,
- state->dst.x1, state->dst.y1,
- drm_rect_width(&state->dst),
- drm_rect_height(&state->dst),
- state->src.x1 >> 16,
- state->src.y1 >> 16,
- drm_rect_width(&state->src) >> 16,
- drm_rect_height(&state->src) >> 16);
- } else {
- intel_plane->disable_plane(plane, crtc);
- }
-}
-
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
intel_plane->plane = plane;
intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
intel_plane->check_plane = intel_check_sprite_plane;
- intel_plane->commit_plane = intel_commit_sprite_plane;
possible_crtcs = (1 << pipe);
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs,
}
}
+static bool
+fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+{
+ u32 dbg;
+
+ dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
+ if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
+ return false;
+
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+
+ return true;
+}
+
+static bool
+vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+{
+ u32 cer;
+
+ cer = __raw_i915_read32(dev_priv, CLAIM_ER);
+ if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
+ return false;
+
+ __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
+
+ return true;
+}
+
+static bool
+check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+{
+ if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
+ return fpga_check_for_unclaimed_mmio(dev_priv);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ return vlv_check_for_unclaimed_mmio(dev_priv);
+
+ return false;
+}
+
static void __intel_uncore_early_sanitize(struct drm_device *dev,
bool restore_forcewake)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (HAS_FPGA_DBG_UNCLAIMED(dev))
- __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ /* clear out unclaimed reg detection bit */
+ if (check_for_unclaimed_mmio(dev_priv))
+ DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
/* clear out old GT FIFO errors */
if (IS_GEN6(dev) || IS_GEN7(dev))
}
static void
-hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv,
- i915_reg_t reg, bool read, bool before)
+__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
+ const i915_reg_t reg,
+ const bool read,
+ const bool before)
{
- const char *op = read ? "reading" : "writing to";
- const char *when = before ? "before" : "after";
-
- if (!i915.mmio_debug)
+ /* XXX. We limit the auto arming traces for mmio
+ * debugs on these platforms. There are just too many
+ * revealed by these and CI/Bat suffers from the noise.
+ * Please fix and then re-enable the automatic traces.
+ */
+ if (i915.mmio_debug < 2 &&
+ (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
return;
- if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
- WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
- when, op, i915_mmio_reg_offset(reg));
- __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ if (WARN(check_for_unclaimed_mmio(dev_priv),
+ "Unclaimed register detected %s %s register 0x%x\n",
+ before ? "before" : "after",
+ read ? "reading" : "writing to",
+ i915_mmio_reg_offset(reg)))
i915.mmio_debug--; /* Only report the first N failures */
- }
}
-static void
-hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
+static inline void
+unclaimed_reg_debug(struct drm_i915_private *dev_priv,
+ const i915_reg_t reg,
+ const bool read,
+ const bool before)
{
- static bool mmio_debug_once = true;
-
- if (i915.mmio_debug || !mmio_debug_once)
+ if (likely(!i915.mmio_debug))
return;
- if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
- DRM_DEBUG("Unclaimed register detected, "
- "enabling oneshot unclaimed register reporting. "
- "Please use i915.mmio_debug=N for more information.\n");
- __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
- i915.mmio_debug = mmio_debug_once--;
- }
+ __unclaimed_reg_debug(dev_priv, reg, read, before);
}
#define GEN2_READ_HEADER(x) \
unsigned long irqflags; \
u##x val = 0; \
assert_rpm_wakelock_held(dev_priv); \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
+ unclaimed_reg_debug(dev_priv, reg, true, true)
#define GEN6_READ_FOOTER \
+ unclaimed_reg_debug(dev_priv, reg, true, false); \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
GEN6_READ_HEADER(x); \
- hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
if (NEEDS_FORCE_WAKE(offset)) \
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
val = __raw_i915_read##x(dev_priv, reg); \
- hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
GEN6_READ_FOOTER; \
}
gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
if (!SKL_NEEDS_FORCE_WAKE(offset)) \
fw_engine = 0; \
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
if (fw_engine) \
__force_wake_get(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
- hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
GEN6_READ_FOOTER; \
}
unsigned long irqflags; \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
assert_rpm_wakelock_held(dev_priv); \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
+ unclaimed_reg_debug(dev_priv, reg, false, true)
#define GEN6_WRITE_FOOTER \
+ unclaimed_reg_debug(dev_priv, reg, false, false); \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
#define __gen6_write(x) \
if (NEEDS_FORCE_WAKE(offset)) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
- hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
- hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
- hsw_unclaimed_reg_detect(dev_priv); \
GEN6_WRITE_FOOTER; \
}
static void \
gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
GEN6_WRITE_HEADER; \
- hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
__raw_i915_write##x(dev_priv, reg, val); \
- hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
- hsw_unclaimed_reg_detect(dev_priv); \
GEN6_WRITE_FOOTER; \
}
bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
- hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
if (!SKL_NEEDS_FORCE_WAKE(offset) || \
is_gen9_shadowed(dev_priv, reg)) \
fw_engine = 0; \
if (fw_engine) \
__force_wake_get(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
- hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
- hsw_unclaimed_reg_detect(dev_priv); \
GEN6_WRITE_FOOTER; \
}
intel_uncore_fw_domains_init(dev);
__intel_uncore_early_sanitize(dev, false);
+ dev_priv->uncore.unclaimed_mmio_check = 1;
+
switch (INTEL_INFO(dev)->gen) {
default:
case 9:
return intel_get_gpu_reset(dev) != NULL;
}
-void intel_uncore_check_errors(struct drm_device *dev)
+bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ return check_for_unclaimed_mmio(dev_priv);
+}
+
+bool
+intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
+{
+ if (unlikely(i915.mmio_debug ||
+ dev_priv->uncore.unclaimed_mmio_check <= 0))
+ return false;
- if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
- (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
- DRM_ERROR("Unclaimed register before interrupt\n");
- __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
+ DRM_DEBUG("Unclaimed register detected, "
+ "enabling oneshot unclaimed register reporting. "
+ "Please use i915.mmio_debug=N for more information.\n");
+ i915.mmio_debug++;
+ dev_priv->uncore.unclaimed_mmio_check--;
+ return true;
}
+
+ return false;
}
/** Handle of the buffer to check for busy */
__u32 handle;
- /** Return busy status (1 if busy, 0 if idle).
- * The high word is used to indicate on which rings the object
- * currently resides:
- * 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
+ /** Return busy status
+ *
+ * A return of 0 implies that the object is idle (after
+ * having flushed any pending activity), and a non-zero return that
+ * the object is still in-flight on the GPU. (The GPU has not yet
+ * signaled completion for all pending requests that reference the
+ * object.)
+ *
+ * The returned dword is split into two fields to indicate both
+ * the engines on which the object is being read, and the
+ * engine on which it is currently being written (if any).
+ *
+ * The low word (bits 0:15) indicate if the object is being written
+ * to by any engine (there can only be one, as the GEM implicit
+ * synchronisation rules force writes to be serialised). Only the
+ * engine for the last write is reported.
+ *
+ * The high word (bits 16:31) are a bitmask of which engines are
+ * currently reading from the object. Multiple engines may be
+ * reading from the object simultaneously.
+ *
+ * The value of each engine is the same as specified in the
+ * EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc.
+ * Note I915_EXEC_DEFAULT is a symbolic value and is mapped to
+ * the I915_EXEC_RENDER engine for execution, and so it is never
+ * reported as active itself. Some hardware may have parallel
+ * execution engines, e.g. multiple media engines, which are
+ * mapped to the same identifier in the EXECBUFFER2 ioctl and
+ * so are not separately reported for busyness.
*/
__u32 busy;
};
DEFINE_SPINLOCK(swap_lock);
static unsigned int nr_swapfiles;
atomic_long_t nr_swap_pages;
+/*
+ * Some modules use swappable objects and may try to swap them out under
+ * memory pressure (via the shrinker). Before doing so, they may wish to
+ * check to see if any swap space is available.
+ */
+EXPORT_SYMBOL_GPL(nr_swap_pages);
/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
long total_swap_pages;
static int least_priority;