intel_wakeref_t wakeref;
unsigned long freed = 0;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
freed = i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
intel_wakeref_t wakeref;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
freed += i915_gem_shrink(i915,
sc->nr_to_scan - sc->nr_scanned,
&sc->nr_scanned,
unsigned long flags;
freed_pages = 0;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
MAX_SCHEDULE_TIMEOUT))
goto out;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
}
}
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = gpu_fill(obj, ctx, engine, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
}
err = 0;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = gpu_fill(obj, ctx, engine, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
}
err = 0;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = gpu_fill(obj, ctx, engine, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
intel_wakeref_t wakeref;
err = 0;
- with_intel_runtime_pm(ce->engine->i915, wakeref)
+ with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref)
err = ce->ops->pin(ce);
if (err)
goto err;
intel_wakeref_t wakeref;
mutex_lock(&error->wedge_mutex);
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
__i915_gem_set_wedged(i915);
mutex_unlock(&error->wedge_mutex);
}
if (i915_terminally_wedged(i915))
return -EIO; /* we're long past hope of a successful reset */
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = i915_subtests(tests, i915);
return err;
GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
rq = ERR_PTR(-ENODEV);
- with_intel_runtime_pm(engine->i915, wakeref)
+ with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref)
rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
kernel_context_close(ctx);
if (err)
goto out;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = reset(engine);
igt_spinner_end(&spin);
intel_wakeref_t wakeref;
gpu = NULL;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
gpu = i915_capture_gpu_state(i915);
if (IS_ERR(gpu))
return PTR_ERR(gpu);
return 0;
}
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
for_each_engine(engine, dev_priv, id)
acthd[id] = intel_engine_get_active_head(engine);
intel_wakeref_t wakeref;
int err = -ENODEV;
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
err = vlv_drpc_info(m);
else if (INTEL_GEN(dev_priv) >= 6)
if (!IS_GEN(i915, 5))
return -ENODEV;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
unsigned long temp, chipset, gfx;
temp = i915_mch_val(i915);
u32 act_freq = rps->cur_freq;
intel_wakeref_t wakeref;
- with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
+ with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_punit_get(dev_priv);
act_freq = vlv_punit_read(dev_priv,
p = drm_seq_file_printer(m);
intel_uc_fw_dump(&dev_priv->huc.fw, &p);
- with_intel_runtime_pm(dev_priv, wakeref)
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
return 0;
p = drm_seq_file_printer(m);
intel_uc_fw_dump(&dev_priv->guc.fw, &p);
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
u32 tmp = I915_READ(GUC_STATUS);
u32 i;
return -ENODEV;
units = (power & 0x1f00) >> 8;
- with_intel_runtime_pm(dev_priv, wakeref)
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
power = I915_READ(MCH_SECP_NRG_STTS);
power = (1000000 * power) >> units; /* convert to uJ */
if (ret < 0)
return ret;
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
if (!dev_priv->ipc_enabled && enable)
DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
dev_priv->wm.distrust_bios_wm = true;
if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
return -ENODEV;
- with_intel_runtime_pm(dev_priv, wakeref)
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
return -EINVAL;
DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
u32 snpcr;
/* Update the cache sharing policy here as well */
sseu.max_eus_per_subslice =
RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
if (IS_CHERRYVIEW(dev_priv))
cherryview_sseu_device_status(dev_priv, &sseu);
else if (IS_BROADWELL(dev_priv))
i915_gem_chipset_flush(dev_priv);
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
struct intel_uncore *uncore = &dev_priv->uncore;
spin_lock_irq(&uncore->lock);
if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
struct drm_i915_private *i915 = vma->vm->i915;
intel_wakeref_t wakeref;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
}
if (flags & I915_VMA_GLOBAL_BIND) {
intel_wakeref_t wakeref;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
vma->vm->insert_entries(vma->vm, vma,
cache_level, pte_flags);
}
struct i915_address_space *vm = vma->vm;
intel_wakeref_t wakeref;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
vm->clear_range(vm, vma->node.start, vma->size);
}
if (dev_priv->gt.awake) {
intel_wakeref_t wakeref;
- with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
+ with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm,
+ wakeref) {
val = intel_uncore_read_notrace(&dev_priv->uncore,
GEN6_RPSTAT1);
val = intel_get_cagf(dev_priv, val);
intel_wakeref_t wakeref;
u64 res = 0;
- with_intel_runtime_pm(dev_priv, wakeref)
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
res = intel_rc6_residency_us(dev_priv, reg);
return DIV_ROUND_CLOSEST_ULL(res, 1000);
* Generally device is expected to be active only at this
* time, so get/put should be really quick.
*/
- with_intel_runtime_pm(dev_priv, wakeref)
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
guc_action_flush_log_complete(guc);
}
if (log->level == level)
goto out_unlock;
- with_intel_runtime_pm(dev_priv, wakeref)
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
ret = guc_action_control_log(guc,
GUC_LOG_LEVEL_IS_VERBOSE(level),
GUC_LOG_LEVEL_IS_ENABLED(level),
*/
flush_work(&log->relay.flush_work);
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
guc_action_flush_log(guc);
/* GuC would have updated log buffer by now, so capture it */
if (!HAS_HUC(dev_priv))
return -ENODEV;
- with_intel_runtime_pm(dev_priv, wakeref)
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
status = (I915_READ(huc->status.reg) & huc->status.mask) ==
huc->status.value;
intel_wakeref_t wakeref;
int ret = 0;
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
u32 hw_level;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
if (!IS_GEN(dev_priv, 5))
return 0;
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
spin_lock_irq(&mchdev_lock);
val = __i915_chipset_val(dev_priv);
spin_unlock_irq(&mchdev_lock);
if (!IS_GEN(dev_priv, 5))
return;
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
spin_lock_irq(&mchdev_lock);
__i915_update_gfx_val(dev_priv);
spin_unlock_irq(&mchdev_lock);
if (!IS_GEN(dev_priv, 5))
return 0;
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
spin_lock_irq(&mchdev_lock);
val = __i915_gfx_val(dev_priv);
spin_unlock_irq(&mchdev_lock);
if (!i915)
return 0;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
spin_lock_irq(&mchdev_lock);
chipset_val = __i915_chipset_val(i915);
graphics_val = __i915_gfx_val(i915);
intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
-#define with_intel_runtime_pm(i915, wf) \
- for ((wf) = intel_runtime_pm_get(&(i915)->runtime_pm); (wf); \
- intel_runtime_pm_put(&(i915)->runtime_pm, (wf)), (wf) = 0)
+#define with_intel_runtime_pm(rpm, wf) \
+ for ((wf) = intel_runtime_pm_get(rpm); (wf); \
+ intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
-#define with_intel_runtime_pm_if_in_use(i915, wf) \
- for ((wf) = intel_runtime_pm_get_if_in_use(&(i915)->runtime_pm); (wf); \
- intel_runtime_pm_put(&(i915)->runtime_pm, (wf)), (wf) = 0)
+#define with_intel_runtime_pm_if_in_use(rpm, wf) \
+ for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \
+ intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
if (!intel_guc_is_loaded(guc))
return;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
intel_uc_runtime_suspend(i915);
}
flags = reg->offset & (entry->size - 1);
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
reg->val = intel_uncore_read64_2x32(uncore,
entry->offset_ldw,
{
intel_wakeref_t wakeref;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
i915_gem_suspend_gtt_mappings(i915);
i915_gem_suspend_late(i915);
}
{
intel_wakeref_t wakeref;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
i915_gem_suspend_gtt_mappings(i915);
i915_gem_freeze(i915);
* Both suspend and hibernate follow the same wakeup path and assume
* that runtime-pm just works.
*/
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
intel_gt_sanitize(i915, false);
i915_gem_sanitize(i915);
i915_gem_resume(i915);
return -ENOMEM;
mutex_lock(&i915->drm.struct_mutex);
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = i915_subtests(tests, i915);
mutex_unlock(&i915->drm.struct_mutex);
if (!i915)
return -ENOMEM;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = i915_subtests(tests, i915);
drm_dev_put(&i915->drm);