select INPUT if ACPI
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
+ select SYNC_FILE
+ select IOSF_MBI
help
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
case I915_PARAM_IRQ_ACTIVE:
case I915_PARAM_ALLOW_BATCHBUFFER:
case I915_PARAM_LAST_DISPATCH:
+ case I915_PARAM_HAS_EXEC_CONSTANTS:
/* Reject all old ums/dri params. */
return -ENODEV;
case I915_PARAM_CHIPSET_ID:
case I915_PARAM_HAS_BSD2:
value = !!dev_priv->engine[VCS2];
break;
- case I915_PARAM_HAS_EXEC_CONSTANTS:
- value = INTEL_GEN(dev_priv) >= 4;
- break;
case I915_PARAM_HAS_LLC:
value = HAS_LLC(dev_priv);
break;
value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
break;
case I915_PARAM_HUC_STATUS:
- /* The register is already force-woken. We dont need
- * any rpm here
- */
+ intel_runtime_pm_get(dev_priv);
value = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
+ intel_runtime_pm_put(dev_priv);
break;
case I915_PARAM_MMAP_GTT_VERSION:
/* Though we've started our numbering from 1, and so class all
case I915_PARAM_HAS_EXEC_HANDLE_LUT:
case I915_PARAM_HAS_COHERENT_PHYS_GTT:
case I915_PARAM_HAS_EXEC_SOFTPIN:
+ case I915_PARAM_HAS_EXEC_ASYNC:
+ case I915_PARAM_HAS_EXEC_FENCE:
/* For the time being all of these are always true;
* if some supported hardware does not have one of these
* features this value needs to be provided from
return -ENOMEM;
}
+static void i915_engines_cleanup(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id)
+ kfree(engine);
+}
+
static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
destroy_workqueue(dev_priv->hotplug.dp_wq);
*/
static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
{
- if (IS_HSW_EARLY_SDV(dev_priv) ||
- IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
+ bool pre = false;
+
+ pre |= IS_HSW_EARLY_SDV(dev_priv);
+ pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
+ pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
+
+ if (pre) {
DRM_ERROR("This is a pre-production stepping. "
"It may not be fully functional.\n");
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
+ }
}
/**
spin_lock_init(&dev_priv->gpu_error.lock);
mutex_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
+
spin_lock_init(&dev_priv->mm.object_stat_lock);
spin_lock_init(&dev_priv->mmio_flip_lock);
spin_lock_init(&dev_priv->wm.dsparb_lock);
mutex_init(&dev_priv->pps_mutex);
intel_uc_init_early(dev_priv);
-
i915_memcpy_init_early(dev_priv);
+ ret = intel_engines_init_early(dev_priv);
+ if (ret)
+ return ret;
+
ret = i915_workqueues_init(dev_priv);
if (ret < 0)
- return ret;
+ goto err_engines;
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev_priv);
err_workqueues:
i915_workqueues_cleanup(dev_priv);
+err_engines:
+ i915_engines_cleanup(dev_priv);
return ret;
}
i915_perf_fini(dev_priv);
i915_gem_load_cleanup(dev_priv);
i915_workqueues_cleanup(dev_priv);
+ i915_engines_cleanup(dev_priv);
}
static int i915_mmio_setup(struct drm_i915_private *dev_priv)
goto put_bridge;
intel_uncore_init(dev_priv);
+ i915_gem_init_mmio(dev_priv);
return 0;
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores);
- DRM_DEBUG_DRIVER("use GPU sempahores? %s\n", yesno(i915.semaphores));
+ DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores));
}
/**
if (IS_GEN5(dev_priv))
intel_gpu_ips_init(dev_priv);
- i915_audio_component_init(dev_priv);
+ intel_audio_init(dev_priv);
/*
* Some ports require correctly set-up hpd registers for detection to
*/
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
- i915_audio_component_cleanup(dev_priv);
+ intel_audio_deinit(dev_priv);
intel_gpu_ips_teardown();
acpi_video_unregister();
i915_teardown_sysfs(dev_priv);
i915_guc_log_unregister(dev_priv);
- i915_debugfs_unregister(dev_priv);
drm_dev_unregister(&dev_priv->drm);
i915_gem_shrinker_cleanup(dev_priv);
*/
int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ const struct intel_device_info *match_info =
+ (struct intel_device_info *)ent->driver_data;
struct drm_i915_private *dev_priv;
int ret;
- if (i915.nuclear_pageflip)
- driver.driver_features |= DRIVER_ATOMIC;
+ /* Enable nuclear pageflip on ILK+, except vlv/chv */
+ if (!i915.nuclear_pageflip &&
+ (match_info->gen < 5 || match_info->has_gmch_display))
+ driver.driver_features &= ~DRIVER_ATOMIC;
ret = -ENOMEM;
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev);
if (ret) {
DRM_DEV_ERROR(&pdev->dev, "allocation failed\n");
- kfree(dev_priv);
- return ret;
+ goto out_free;
}
dev_priv->drm.pdev = pdev;
ret = pci_enable_device(pdev);
if (ret)
- goto out_free_priv;
+ goto out_fini;
pci_set_drvdata(pdev, &dev_priv->drm);
i915_driver_cleanup_early(dev_priv);
out_pci_disable:
pci_disable_device(pdev);
-out_free_priv:
+out_fini:
i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
- drm_dev_unref(&dev_priv->drm);
+ drm_dev_fini(&dev_priv->drm);
+out_free:
+ kfree(dev_priv);
return ret;
}
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret;
intel_fbdev_fini(dev);
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ drm_modeset_acquire_init(&ctx, 0);
+ while (1) {
+ ret = drm_modeset_lock_all_ctx(dev, &ctx);
+ if (!ret)
+ ret = drm_atomic_helper_disable_all(dev, &ctx);
+
+ if (ret != -EDEADLK)
+ break;
+
+ drm_modeset_backoff(&ctx);
+ }
+
+ if (ret)
+ DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
intel_gvt_cleanup(dev_priv);
i915_driver_unregister(dev_priv);
/* Free error state after interrupts are fully disabled. */
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
- i915_destroy_error_state(dev_priv);
+ i915_reset_error_state(dev_priv);
/* Flush any outstanding unpin_work. */
drain_workqueue(dev_priv->wq);
i915_driver_cleanup_mmio(dev_priv);
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+}
+
+static void i915_driver_release(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_driver_cleanup_early(dev_priv);
+ drm_dev_fini(&dev_priv->drm);
+
+ kfree(dev_priv);
}
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_notify_adapter(dev_priv, opregion_target_state);
- intel_uncore_forcewake_reset(dev_priv, false);
+ intel_uncore_suspend(dev_priv);
intel_opregion_unregister(dev_priv);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
ret);
- intel_uncore_early_sanitize(dev_priv, true);
+ intel_uncore_resume_early(dev_priv);
if (IS_GEN9_LP(dev_priv)) {
if (!dev_priv->suspended_to_idle)
!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
intel_power_domains_init_hw(dev_priv, true);
+ i915_gem_sanitize(dev_priv);
+
enable_rpm_wakeref_asserts(dev_priv);
out:
goto error;
}
- i915_gem_reset_finish(dev_priv);
+ i915_gem_reset(dev_priv);
intel_overlay_reset(dev_priv);
/* Ok, now get things going again... */
i915_queue_hangcheck(dev_priv);
wakeup:
+ i915_gem_reset_finish(dev_priv);
enable_irq(dev_priv->drm.irq);
wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
return;
return ret;
}
- intel_uncore_forcewake_reset(dev_priv, false);
+ intel_uncore_suspend(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
*/
.driver_features =
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
- DRIVER_RENDER | DRIVER_MODESET,
+ DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC,
+ .release = i915_driver_release,
.open = i915_driver_open,
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/mock_drm.c"
+#endif
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20170123"
-#define DRIVER_TIMESTAMP 1485156432
+#define DRIVER_DATE "20170306"
+#define DRIVER_TIMESTAMP 1488785683
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
PLANE_PRIMARY,
PLANE_SPRITE0,
PLANE_SPRITE1,
+ PLANE_SPRITE2,
PLANE_CURSOR,
I915_MAX_PLANES,
};
POWER_DOMAIN_PORT_DDI_C_LANES,
POWER_DOMAIN_PORT_DDI_D_LANES,
POWER_DOMAIN_PORT_DDI_E_LANES,
+ POWER_DOMAIN_PORT_DDI_A_IO,
+ POWER_DOMAIN_PORT_DDI_B_IO,
+ POWER_DOMAIN_PORT_DDI_C_IO,
+ POWER_DOMAIN_PORT_DDI_D_IO,
+ POWER_DOMAIN_PORT_DDI_E_IO,
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER,
#define for_each_hpd_pin(__pin) \
for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
+#define HPD_STORM_DEFAULT_THRESHOLD 5
+
struct i915_hotplug {
struct work_struct hotplug_work;
struct work_struct poll_init_work;
bool poll_enabled;
+ unsigned int hpd_storm_threshold;
+
/*
* if we get a HPD irq from DP and a HPD irq from non-DP
* the non-DP HPD could block the workqueue on a mode config
&(dev)->mode_config.encoder_list, \
base.head)
-#define for_each_intel_connector(dev, intel_connector) \
- list_for_each_entry(intel_connector, \
- &(dev)->mode_config.connector_list, \
- base.head)
+#define for_each_intel_connector_iter(intel_connector, iter) \
+ while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
- for_each_if ((1 << (domain)) & (mask))
+ for_each_if (BIT_ULL(domain) & (mask))
+
+#define for_each_power_well(__dev_priv, __power_well) \
+ for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
+ (__power_well) - (__dev_priv)->power_domains.power_wells < \
+ (__dev_priv)->power_domains.power_well_count; \
+ (__power_well)++)
+
+#define for_each_power_well_rev(__dev_priv, __power_well) \
+ for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
+ (__dev_priv)->power_domains.power_well_count - 1; \
+ (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
+ (__power_well)--)
+
+#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \
+ for_each_power_well(__dev_priv, __power_well) \
+ for_each_if ((__power_well)->domains & (__domain_mask))
+
+#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
+ for_each_power_well_rev(__dev_priv, __power_well) \
+ for_each_if ((__power_well)->domains & (__domain_mask))
+
+#define for_each_intel_plane_in_state(__state, plane, plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+ ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+ (plane_state) = to_intel_plane_state((__state)->base.planes[__i].state), 1); \
+ (__i)++) \
+ for_each_if (plane_state)
struct drm_i915_private;
struct i915_mm_struct;
struct intel_crtc;
struct intel_limit;
struct dpll;
+struct intel_cdclk_state;
struct drm_i915_display_funcs {
- int (*get_display_clock_speed)(struct drm_i915_private *dev_priv);
+ void (*get_cdclk)(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state);
+ void (*set_cdclk)(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *cdclk_state);
int (*get_fifo_size)(struct drm_i915_private *dev_priv, int plane);
int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
int (*compute_intermediate_wm)(struct drm_device *dev,
int (*compute_global_watermarks)(struct drm_atomic_state *state);
void (*update_wm)(struct intel_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
- void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode);
void (*audio_codec_disable)(struct intel_encoder *encoder);
- void (*fdi_link_train)(struct drm_crtc *crtc);
+ void (*fdi_link_train)(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state);
void (*init_clock_gating)(struct drm_i915_private *dev_priv);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
const struct intel_forcewake_range *fw_domains_table;
unsigned int fw_domains_table_entries;
+ struct notifier_block pmic_bus_access_nb;
struct intel_uncore_funcs funcs;
unsigned fifo_count;
INTEL_BROXTON,
INTEL_KABYLAKE,
INTEL_GEMINILAKE,
+ INTEL_MAX_PLATFORMS
};
struct intel_device_info {
struct intel_display_error_state;
-struct drm_i915_error_state {
+struct i915_gpu_state {
struct kref ref;
struct timeval time;
struct timeval boottime;
char error_msg[128];
bool simulated;
+ bool awake;
+ bool wakelock;
+ bool suspended;
int iommu;
u32 reset_count;
u32 suspend_count;
struct intel_device_info device_info;
+ struct i915_params params;
/* Generic register state */
u32 eir;
u32 pgtbl_er;
u32 ier;
- u32 gtier[4];
+ u32 gtier[4], ngtier;
u32 ccid;
u32 derrmr;
u32 forcewake;
u32 gab_ctl;
u32 gfx_mode;
+ u32 nfence;
u64 fence[I915_MAX_NUM_FENCES];
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
struct intel_instdone instdone;
+ struct drm_i915_error_context {
+ char comm[TASK_COMM_LEN];
+ pid_t pid;
+ u32 handle;
+ u32 hw_id;
+ int ban_score;
+ int active;
+ int guilty;
+ } context;
+
struct drm_i915_error_object {
u64 gtt_offset;
u64 gtt_size;
u32 pp_dir_base;
};
} vm_info;
-
- pid_t pid;
- char comm[TASK_COMM_LEN];
- int context_bans;
} engine[I915_NUM_ENGINES];
struct drm_i915_error_buffer {
u32 pm_iir;
/* PM interrupt bits that should never be masked */
- u32 pm_intr_keep;
+ u32 pm_intrmsk_mbz;
/* Frequencies are stored in potentially platform dependent multiples.
* In other words, *_freq needs to be multiplied by X to be interesting.
unsigned boosts;
/* manual wa residency calculations */
- struct intel_rps_ei up_ei, down_ei;
+ struct intel_rps_ei ei;
/*
* Protects RPS/RC6 register access and PCU communication.
int count;
/* cached hw enabled state */
bool hw_enabled;
- unsigned long domains;
+ u64 domains;
/* unique identifier for this power well */
unsigned long id;
/*
struct work_struct free_work;
/** Usable portion of the GTT for GEM */
- phys_addr_t stolen_base; /* limited to low memory (32-bit) */
+ dma_addr_t stolen_base; /* limited to low memory (32-bit) */
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
loff_t pos;
};
-struct i915_error_state_file_priv {
- struct drm_i915_private *i915;
- struct drm_i915_error_state *error;
-};
-
#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
/* For reset and error_state handling. */
spinlock_t lock;
/* Protected by the above dev->gpu_error.lock. */
- struct drm_i915_error_state *first_error;
+ struct i915_gpu_state *first_error;
unsigned long missed_irq_rings;
bool (*oa_buffer_is_empty)(struct drm_i915_private *dev_priv);
};
+struct intel_cdclk_state {
+ unsigned int cdclk, vco, ref;
+};
+
struct drm_i915_private {
struct drm_device drm;
const struct intel_device_info info;
- int relative_constants_mode;
-
void __iomem *regs;
struct intel_uncore uncore;
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int skl_preferred_vco_freq;
- unsigned int cdclk_freq, max_cdclk_freq;
-
- /*
- * For reading holding any crtc lock is sufficient,
- * for writing must hold all of them.
- */
- unsigned int atomic_cdclk_freq;
+ unsigned int max_cdclk_freq;
unsigned int max_dotclk_freq;
unsigned int rawclk_freq;
unsigned int czclk_freq;
struct {
- unsigned int vco, ref;
- } cdclk_pll;
+ /*
+ * The current logical cdclk state.
+ * See intel_atomic_state.cdclk.logical
+ *
+ * For reading holding any crtc lock is sufficient,
+ * for writing must hold all of them.
+ */
+ struct intel_cdclk_state logical;
+ /*
+ * The current actual cdclk state.
+ * See intel_atomic_state.cdclk.actual
+ */
+ struct intel_cdclk_state actual;
+ /* The current hardware cdclk state */
+ struct intel_cdclk_state hw;
+ } cdclk;
/**
* wq - Driver workqueue for GEM.
/* Used to save the pipe-to-encoder mapping for audio */
struct intel_encoder *av_enc_map[I915_MAX_PIPES];
+ /* necessary resource sharing with HDMI LPE audio driver. */
+ struct {
+ struct platform_device *platdev;
+ int irq;
+ } lpe_audio;
+
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place.
#define IS_KBL_REVID(dev_priv, since, until) \
(IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
+#define GLK_REVID_A0 0x0
+#define GLK_REVID_A1 0x1
+
+#define IS_GLK_REVID(dev_priv, since, until) \
+ (IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
+
/*
* The genX designation typically refers to the render engine, so render
* capability related checks should use IS_GEN, while display and other checks
#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
-#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && INTEL_INFO(dev_priv)->is_lp)
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
+#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv))
+#define IS_GEN9_BC(dev_priv) (IS_GEN9(dev_priv) && !IS_LP(dev_priv))
#define ENGINE_MASK(id) BIT(id)
#define RENDER_RING ENGINE_MASK(RCS)
/* WaRsDisableCoarsePowerGating:skl,bxt */
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
- (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \
- IS_SKL_GT3(dev_priv) || \
- IS_SKL_GT4(dev_priv))
+ (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
/*
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
+int intel_engines_init_early(struct drm_i915_private *dev_priv);
+int intel_engines_init(struct drm_i915_private *dev_priv);
+
/* intel_hotplug.c */
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask);
void intel_irq_uninstall(struct drm_i915_private *dev_priv);
extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
- extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
- bool restore_forcewake);
extern void intel_uncore_init(struct drm_i915_private *dev_priv);
extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
extern void intel_uncore_fini(struct drm_i915_private *dev_priv);
- extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
- bool restore);
+ extern void intel_uncore_suspend(struct drm_i915_private *dev_priv);
+ extern void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
struct drm_file *file_priv);
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+void i915_gem_sanitize(struct drm_i915_private *i915);
int i915_gem_load_init(struct drm_i915_private *dev_priv);
void i915_gem_load_cleanup(struct drm_i915_private *dev_priv);
void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
}
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
+void i915_gem_reset(struct drm_i915_private *dev_priv);
void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
-void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
+
+void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
-int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
- unsigned int flags);
+int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
+ unsigned int flags);
int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
-int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int i915_gem_fault(struct vm_fault *vmf);
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
long timeout,
/* i915_debugfs.c */
#ifdef CONFIG_DEBUG_FS
int i915_debugfs_register(struct drm_i915_private *dev_priv);
-void i915_debugfs_unregister(struct drm_i915_private *dev_priv);
int i915_debugfs_connector_add(struct drm_connector *connector);
void intel_display_crc_init(struct drm_i915_private *dev_priv);
#else
static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;}
-static inline void i915_debugfs_unregister(struct drm_i915_private *dev_priv) {}
static inline int i915_debugfs_connector_add(struct drm_connector *connector)
{ return 0; }
static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
- const struct i915_error_state_file_priv *error);
+ const struct i915_gpu_state *gpu);
int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
struct drm_i915_private *i915,
size_t count, loff_t pos);
{
kfree(eb->buf);
}
+
+struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
void i915_capture_error_state(struct drm_i915_private *dev_priv,
u32 engine_mask,
const char *error_msg);
-void i915_error_state_get(struct drm_device *dev,
- struct i915_error_state_file_priv *error_priv);
-void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
-void i915_destroy_error_state(struct drm_i915_private *dev_priv);
+
+static inline struct i915_gpu_state *
+i915_gpu_state_get(struct i915_gpu_state *gpu)
+{
+ kref_get(&gpu->ref);
+ return gpu;
+}
+
+void __i915_gpu_state_free(struct kref *kref);
+static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
+{
+ if (gpu)
+ kref_put(&gpu->ref, __i915_gpu_state_free);
+}
+
+struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
+void i915_reset_error_state(struct drm_i915_private *i915);
#else
{
}
-static inline void i915_destroy_error_state(struct drm_i915_private *dev_priv)
+static inline struct i915_gpu_state *
+i915_first_error_state(struct drm_i915_private *i915)
+{
+ return NULL;
+}
+
+static inline void i915_reset_error_state(struct drm_i915_private *i915)
{
}
void i915_setup_sysfs(struct drm_i915_private *dev_priv);
void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
+/* intel_lpe_audio.c */
+int intel_lpe_audio_init(struct drm_i915_private *dev_priv);
+void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
+void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
+void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
+ void *eld, int port, int pipe, int tmds_clk_speed,
+ bool dp_output, int link_rate);
+
/* intel_i2c.c */
extern int intel_setup_gmbus(struct drm_i915_private *dev_priv);
extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv);
extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
-extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
+extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
extern struct intel_display_error_state *
intel_display_capture_error_state(struct drm_i915_private *dev_priv);
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
- struct drm_i915_private *dev_priv,
struct intel_display_error_state *error);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
/* intel_sideband.c */
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
-void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
+int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
}
static inline bool
-__i915_request_irq_complete(struct drm_i915_gem_request *req)
+__i915_request_irq_complete(const struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
+ u32 seqno;
+
+ /* Note that the engine may have wrapped around the seqno, and
+ * so our request->global_seqno will be ahead of the hardware,
+ * even though it completed the request before wrapping. We catch
+ * this by kicking all the waiters before resetting the seqno
+ * in hardware, and also signal the fence.
+ */
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &req->fence.flags))
+ return true;
+
+ /* The request was dequeued before we were awoken. We check after
+ * inspecting the hw to confirm that this was the same request
+ * that generated the HWS update. The memory barriers within
+ * the request execution are sufficient to ensure that a check
+ * after reading the value from hw matches this request.
+ */
+ seqno = i915_gem_request_global_seqno(req);
+ if (!seqno)
+ return false;
/* Before we do the heavier coherent read of the seqno,
* check the value (hopefully) in the CPU cacheline.
*/
- if (__i915_gem_request_completed(req))
+ if (__i915_gem_request_completed(req, seqno))
return true;
/* Ensure our read of the seqno is coherent so that we
* is woken.
*/
if (engine->irq_seqno_barrier &&
- rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh) == current &&
- cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) {
- struct task_struct *tsk;
+ test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) {
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
/* The ordering of irq_posted versus applying the barrier
* is crucial. The clearing of the current irq_posted must
* the seqno before we believe it coherent since they see
* irq_posted == false but we are still running).
*/
- rcu_read_lock();
- tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
- if (tsk && tsk != current)
+ spin_lock_irq(&b->irq_lock);
+ if (b->irq_wait && b->irq_wait->tsk != current)
/* Note that if the bottom-half is changed as we
* are sending the wake-up, the new bottom-half will
* be woken by whomever made the change. We only have
* to worry about when we steal the irq-posted for
* ourself.
*/
- wake_up_process(tsk);
- rcu_read_unlock();
+ wake_up_process(b->irq_wait->tsk);
+ spin_unlock_irq(&b->irq_lock);
- if (__i915_gem_request_completed(req))
+ if (__i915_gem_request_completed(req, seqno))
return true;
}
unsigned long addr, unsigned long pfn, unsigned long size,
struct io_mapping *iomap);
+static inline bool i915_gem_object_is_coherent(struct drm_i915_gem_object *obj)
+{
+ return (obj->cache_level != I915_CACHE_NONE ||
+ HAS_LLC(to_i915(obj->base.dev)));
+}
+
#endif
#include "intel_drv.h"
#include "i915_vgpu.h"
+ #include <asm/iosf_mbi.h>
#include <linux/pm_runtime.h>
#define FORCEWAKE_ACK_TIMEOUT_MS 50
for_each_fw_domain_masked(d, fw_domains, dev_priv)
fw_domain_wait_ack(d);
+
+ dev_priv->uncore.fw_domains_active |= fw_domains;
}
static void
fw_domain_put(d);
fw_domain_posting_read(d);
}
+
+ dev_priv->uncore.fw_domains_active &= ~fw_domains;
+}
+
+static void
+vgpu_fw_domains_nop(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
+{
+ /* Guest driver doesn't need to takes care forcewake. */
}
static void
if (WARN_ON(domain->wake_count == 0))
domain->wake_count++;
- if (--domain->wake_count == 0) {
+ if (--domain->wake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
- dev_priv->uncore.fw_domains_active &= ~domain->mask;
- }
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
return HRTIMER_NORESTART;
}
- void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
- bool restore)
+ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
+ bool restore)
{
unsigned long irqflags;
struct intel_uncore_forcewake_domain *domain;
intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
}
- void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
- bool restore_forcewake)
+ void intel_uncore_suspend(struct drm_i915_private *dev_priv)
{
- __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
+ iosf_mbi_unregister_pmic_bus_access_notifier(
+ &dev_priv->uncore.pmic_bus_access_nb);
+ intel_uncore_forcewake_reset(dev_priv, false);
+ }
+
+ void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
+ {
+ __intel_uncore_early_sanitize(dev_priv, true);
+ iosf_mbi_register_pmic_bus_access_notifier(
+ &dev_priv->uncore.pmic_bus_access_nb);
i915_check_and_clear_faults(dev_priv);
}
fw_domains &= ~domain->mask;
}
- if (fw_domains) {
+ if (fw_domains)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
- dev_priv->uncore.fw_domains_active |= fw_domains;
- }
}
/**
void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
- assert_spin_locked(&dev_priv->uncore.lock);
+ lockdep_assert_held(&dev_priv->uncore.lock);
if (!dev_priv->uncore.funcs.force_wake_get)
return;
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
- assert_spin_locked(&dev_priv->uncore.lock);
+ lockdep_assert_held(&dev_priv->uncore.lock);
if (!dev_priv->uncore.funcs.force_wake_put)
return;
return entry->domains;
}
-static void
-intel_fw_table_check(struct drm_i915_private *dev_priv)
-{
- const struct intel_forcewake_range *ranges;
- unsigned int num_ranges;
- s32 prev;
- unsigned int i;
-
- if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
- return;
-
- ranges = dev_priv->uncore.fw_domains_table;
- if (!ranges)
- return;
-
- num_ranges = dev_priv->uncore.fw_domains_table_entries;
-
- for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
- WARN_ON_ONCE(IS_GEN9(dev_priv) &&
- (prev + 1) != (s32)ranges->start);
- WARN_ON_ONCE(prev >= (s32)ranges->start);
- prev = ranges->start;
- WARN_ON_ONCE(prev >= (s32)ranges->end);
- prev = ranges->end;
- }
-}
-
#define GEN_FW_RANGE(s, e, d) \
{ .start = (s), .end = (e), .domains = (d) }
/* TODO: Other registers are not yet used */
};
-static void intel_shadow_table_check(void)
-{
- const i915_reg_t *reg = gen8_shadowed_regs;
- s32 prev;
- u32 offset;
- unsigned int i;
-
- if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
- return;
-
- for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) {
- offset = i915_mmio_reg_offset(*reg);
- WARN_ON_ONCE(prev >= (s32)offset);
- prev = offset;
- }
-}
-
static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
{
u32 offset = i915_mmio_reg_offset(*reg);
fw_domain_arm_timer(domain);
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
- dev_priv->uncore.fw_domains_active |= fw_domains;
}
static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
___force_wake_auto(dev_priv, fw_domains);
}
-#define __gen6_read(x) \
-static u##x \
-gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
- enum forcewake_domains fw_engine; \
- GEN6_READ_HEADER(x); \
- fw_engine = __gen6_reg_read_fw_domains(offset); \
- if (fw_engine) \
- __force_wake_auto(dev_priv, fw_engine); \
- val = __raw_i915_read##x(dev_priv, reg); \
- GEN6_READ_FOOTER; \
-}
-
-#define __fwtable_read(x) \
+#define __gen_read(func, x) \
static u##x \
-fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- fw_engine = __fwtable_reg_read_fw_domains(offset); \
+ fw_engine = __##func##_reg_read_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN6_READ_FOOTER; \
}
+#define __gen6_read(x) __gen_read(gen6, x)
+#define __fwtable_read(x) __gen_read(fwtable, x)
#define __gen9_decoupled_read(x) \
static u##x \
#undef GEN6_READ_FOOTER
#undef GEN6_READ_HEADER
-#define VGPU_READ_HEADER(x) \
- unsigned long irqflags; \
- u##x val = 0; \
- assert_rpm_device_not_suspended(dev_priv); \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
-
-#define VGPU_READ_FOOTER \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
- trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
- return val
-
-#define __vgpu_read(x) \
-static u##x \
-vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
- VGPU_READ_HEADER(x); \
- val = __raw_i915_read##x(dev_priv, reg); \
- VGPU_READ_FOOTER; \
-}
-
-__vgpu_read(8)
-__vgpu_read(16)
-__vgpu_read(32)
-__vgpu_read(64)
-
-#undef __vgpu_read
-#undef VGPU_READ_FOOTER
-#undef VGPU_READ_HEADER
-
#define GEN2_WRITE_HEADER \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
assert_rpm_wakelock_held(dev_priv); \
GEN6_WRITE_FOOTER; \
}
-#define __gen8_write(x) \
-static void \
-gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
- enum forcewake_domains fw_engine; \
- GEN6_WRITE_HEADER; \
- fw_engine = __gen8_reg_write_fw_domains(offset); \
- if (fw_engine) \
- __force_wake_auto(dev_priv, fw_engine); \
- __raw_i915_write##x(dev_priv, reg, val); \
- GEN6_WRITE_FOOTER; \
-}
-
-#define __fwtable_write(x) \
+#define __gen_write(func, x) \
static void \
-fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
- fw_engine = __fwtable_reg_write_fw_domains(offset); \
+ fw_engine = __##func##_reg_write_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
GEN6_WRITE_FOOTER; \
}
+#define __gen8_write(x) __gen_write(gen8, x)
+#define __fwtable_write(x) __gen_write(fwtable, x)
#define __gen9_decoupled_write(x) \
static void \
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
-#define VGPU_WRITE_HEADER \
- unsigned long irqflags; \
- trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
- assert_rpm_device_not_suspended(dev_priv); \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
-
-#define VGPU_WRITE_FOOTER \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
-
-#define __vgpu_write(x) \
-static void vgpu_write##x(struct drm_i915_private *dev_priv, \
- i915_reg_t reg, u##x val, bool trace) { \
- VGPU_WRITE_HEADER; \
- __raw_i915_write##x(dev_priv, reg, val); \
- VGPU_WRITE_FOOTER; \
-}
-
-__vgpu_write(8)
-__vgpu_write(16)
-__vgpu_write(32)
-
-#undef __vgpu_write
-#undef VGPU_WRITE_FOOTER
-#undef VGPU_WRITE_HEADER
-
#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
do { \
dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
FORCEWAKE, FORCEWAKE_ACK);
}
+ if (intel_vgpu_active(dev_priv)) {
+ dev_priv->uncore.funcs.force_wake_get = vgpu_fw_domains_nop;
+ dev_priv->uncore.funcs.force_wake_put = vgpu_fw_domains_nop;
+ }
+
/* All future platforms are expected to require complex power gating */
WARN_ON(dev_priv->uncore.fw_domains == 0);
}
dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
}
+ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+ {
+ struct drm_i915_private *dev_priv = container_of(nb,
+ struct drm_i915_private, uncore.pmic_bus_access_nb);
+
+ switch (action) {
+ case MBI_PMIC_BUS_ACCESS_BEGIN:
+ /*
+ * forcewake all now to make sure that we don't need to do a
+ * forcewake later which on systems where this notifier gets
+ * called requires the punit to access to the shared pmic i2c
+ * bus, which will be busy after this notification, leading to:
+ * "render: timed out waiting for forcewake ack request."
+ * errors.
+ */
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ break;
+ case MBI_PMIC_BUS_ACCESS_END:
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ break;
+ }
+
+ return NOTIFY_OK;
+ }
+
void intel_uncore_init(struct drm_i915_private *dev_priv)
{
i915_check_vgpu(dev_priv);
__intel_uncore_early_sanitize(dev_priv, false);
dev_priv->uncore.unclaimed_mmio_check = 1;
+ dev_priv->uncore.pmic_bus_access_nb.notifier_call =
+ i915_pmic_bus_access_notifier;
switch (INTEL_INFO(dev_priv)->gen) {
default:
break;
}
- intel_fw_table_check(dev_priv);
- if (INTEL_GEN(dev_priv) >= 8)
- intel_shadow_table_check();
-
- if (intel_vgpu_active(dev_priv)) {
- ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
- ASSIGN_READ_MMIO_VFUNCS(vgpu);
- }
-
+ iosf_mbi_register_pmic_bus_access_notifier(
+ &dev_priv->uncore.pmic_bus_access_nb);
+
i915_check_and_clear_faults(dev_priv);
}
#undef ASSIGN_WRITE_MMIO_VFUNCS
void intel_uncore_fini(struct drm_i915_private *dev_priv)
{
+ iosf_mbi_unregister_pmic_bus_access_notifier(
+ &dev_priv->uncore.pmic_bus_access_nb);
+
/* Paranoia: make sure we have disabled everything before we exit. */
intel_uncore_sanitize(dev_priv);
intel_uncore_forcewake_reset(dev_priv, false);
return fw_domains;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/intel_uncore.c"
+#endif
{
u32 value;
- if (dev->accessor_flags & ACCESS_16BIT)
+ if (dev->flags & ACCESS_16BIT)
value = readw_relaxed(dev->base + offset) |
(readw_relaxed(dev->base + offset + 2) << 16);
else
value = readl_relaxed(dev->base + offset);
- if (dev->accessor_flags & ACCESS_SWAP)
+ if (dev->flags & ACCESS_SWAP)
return swab32(value);
else
return value;
static void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
{
- if (dev->accessor_flags & ACCESS_SWAP)
+ if (dev->flags & ACCESS_SWAP)
b = swab32(b);
- if (dev->accessor_flags & ACCESS_16BIT) {
+ if (dev->flags & ACCESS_16BIT) {
writew_relaxed((u16)b, dev->base + offset);
writew_relaxed((u16)(b >> 16), dev->base + offset + 2);
} else {
reg = dw_readl(dev, DW_IC_COMP_TYPE);
if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) {
/* Configure register endianess access */
- dev->accessor_flags |= ACCESS_SWAP;
+ dev->flags |= ACCESS_SWAP;
} else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) {
/* Configure register access mode 16bit */
- dev->accessor_flags |= ACCESS_16BIT;
+ dev->flags |= ACCESS_16BIT;
} else if (reg != DW_IC_COMP_TYPE_VALUE) {
dev_err(dev->dev, "Unknown Synopsys component type: "
"0x%08x\n", reg);
static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
{
struct i2c_msg *msgs = dev->msgs;
- u32 ic_tar = 0;
+ u32 ic_con, ic_tar = 0;
/* Disable the adapter */
__i2c_dw_enable_and_wait(dev, false);
/* if the slave address is ten bit address, enable 10BITADDR */
- if (dev->dynamic_tar_update_enabled) {
+ ic_con = dw_readl(dev, DW_IC_CON);
+ if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
+ ic_con |= DW_IC_CON_10BITADDR_MASTER;
/*
* If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
- * mode has to be enabled via bit 12 of IC_TAR register,
- * otherwise bit 4 of IC_CON is used.
+ * mode has to be enabled via bit 12 of IC_TAR register.
+ * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
+ * detected from registers.
*/
- if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
- ic_tar = DW_IC_TAR_10BITADDR_MASTER;
+ ic_tar = DW_IC_TAR_10BITADDR_MASTER;
} else {
- u32 ic_con = dw_readl(dev, DW_IC_CON);
-
- if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
- ic_con |= DW_IC_CON_10BITADDR_MASTER;
- else
- ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
- dw_writel(dev, ic_con, DW_IC_CON);
+ ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
}
+ dw_writel(dev, ic_con, DW_IC_CON);
+
/*
* Set the slave (target) address and enable 10-bit addressing mode
* if applicable.
return dev->functionality;
}
-static struct i2c_algorithm i2c_dw_algo = {
+static const struct i2c_algorithm i2c_dw_algo = {
.master_xfer = i2c_dw_xfer,
.functionality = i2c_dw_func,
};
tx_aborted:
if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
complete(&dev->cmd_complete);
- else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) {
+ else if (unlikely(dev->flags & ACCESS_INTR_MASK)) {
/* workaround to trigger pending interrupt */
stat = dw_readl(dev, DW_IC_INTR_MASK);
i2c_dw_disable_int(dev);
{
struct i2c_adapter *adap = &dev->adapter;
int r;
- u32 reg;
init_completion(&dev->cmd_complete);
if (r)
return r;
- r = i2c_dw_acquire_lock(dev);
- if (r)
- return r;
-
- /*
- * Test if dynamic TAR update is enabled in this controller by writing
- * to IC_10BITADDR_MASTER field in IC_CON: when it is enabled this
- * field is read-only so it should not succeed
- */
- reg = dw_readl(dev, DW_IC_CON);
- dw_writel(dev, reg ^ DW_IC_CON_10BITADDR_MASTER, DW_IC_CON);
-
- if ((dw_readl(dev, DW_IC_CON) & DW_IC_CON_10BITADDR_MASTER) ==
- (reg & DW_IC_CON_10BITADDR_MASTER)) {
- dev->dynamic_tar_update_enabled = true;
- dev_dbg(dev->dev, "Dynamic TAR update enabled");
- }
-
- i2c_dw_release_lock(dev);
-
snprintf(adap->name, sizeof(adap->name),
"Synopsys DesignWare I2C adapter");
adap->retries = 3;
*/
#include <linux/i2c.h>
+ #include <linux/pm_qos.h>
#define DW_IC_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C | \
I2C_FUNC_SMBUS_BYTE | \
* @fp_lcnt: fast plus LCNT value
* @hs_hcnt: high speed HCNT value
* @hs_lcnt: high speed LCNT value
+ * @pm_qos: pm_qos_request used while holding a hardware lock on the bus
* @acquire_lock: function to acquire a hardware lock on the bus
* @release_lock: function to release a hardware lock on the bus
* @pm_runtime_disabled: true if pm runtime is disabled
unsigned int status;
u32 abort_source;
int irq;
- u32 accessor_flags;
+ u32 flags;
struct i2c_adapter adapter;
u32 functionality;
u32 master_cfg;
u16 fp_lcnt;
u16 hs_hcnt;
u16 hs_lcnt;
+ struct pm_qos_request pm_qos;
int (*acquire_lock)(struct dw_i2c_dev *dev);
void (*release_lock)(struct dw_i2c_dev *dev);
bool pm_runtime_disabled;
- bool dynamic_tar_update_enabled;
};
#define ACCESS_SWAP 0x00000001
#define ACCESS_16BIT 0x00000002
#define ACCESS_INTR_MASK 0x00000004
+ #define MODEL_CHERRYTRAIL 0x00000100
+
extern int i2c_dw_init(struct dw_i2c_dev *dev);
extern void i2c_dw_disable(struct dw_i2c_dev *dev);
extern void i2c_dw_disable_int(struct dw_i2c_dev *dev);
extern int i2c_dw_probe(struct dw_i2c_dev *dev);
#if IS_ENABLED(CONFIG_I2C_DESIGNWARE_BAYTRAIL)
- extern int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev);
+ extern int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev);
+ extern void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev);
#else
- static inline int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev) { return 0; }
+ static inline int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev) { return 0; }
+ static inline void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev) {}
#endif