From: Daniel Vetter Date: Mon, 13 Mar 2017 08:26:06 +0000 (+0100) Subject: Merge tag 'topic/designware-baytrail-2017-03-02' of git://anongit.freedesktop.org... X-Git-Tag: v5.15~11060^2~31^2~73 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=f103560cf7c2abc10283dd0ba9889f3de5b7ac7e;hp=-c;p=platform%2Fkernel%2Flinux-starfive.git Merge tag 'topic/designware-baytrail-2017-03-02' of git://anongit.freedesktop.org/git/drm-intel into drm-intel-next-queued Baytrail PMIC vs. PMU race fixes from Hans de Goede This time the right version (v4), with the compile fix. Signed-off-by: Daniel Vetter --- f103560cf7c2abc10283dd0ba9889f3de5b7ac7e diff --combined drivers/gpu/drm/i915/Kconfig index 1ae0bb9,88e60c9..a5cd5da --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@@ -19,7 -19,7 +19,8 @@@ config DRM_I91 select INPUT if ACPI select ACPI_VIDEO if ACPI select ACPI_BUTTON if ACPI + select SYNC_FILE + select IOSF_MBI help Choose this option if you have a system that has "Intel Graphics Media Accelerator" or "HD Graphics" integrated graphics, diff --combined drivers/gpu/drm/i915/i915_drv.c index 704dbcf,58eb308..b1e9027 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@@ -43,7 -43,6 +43,7 @@@ #include #include +#include #include #include "i915_drv.h" @@@ -249,7 -248,6 +249,7 @@@ static int i915_getparam(struct drm_dev case I915_PARAM_IRQ_ACTIVE: case I915_PARAM_ALLOW_BATCHBUFFER: case I915_PARAM_LAST_DISPATCH: + case I915_PARAM_HAS_EXEC_CONSTANTS: /* Reject all old ums/dri params. */ return -ENODEV; case I915_PARAM_CHIPSET_ID: @@@ -276,6 -274,9 +276,6 @@@ case I915_PARAM_HAS_BSD2: value = !!dev_priv->engine[VCS2]; break; - case I915_PARAM_HAS_EXEC_CONSTANTS: - value = INTEL_GEN(dev_priv) >= 4; - break; case I915_PARAM_HAS_LLC: value = HAS_LLC(dev_priv); break; @@@ -317,9 -318,10 +317,9 @@@ value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool; break; case I915_PARAM_HUC_STATUS: - /* The register is already force-woken. We dont need - * any rpm here - */ + intel_runtime_pm_get(dev_priv); value = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED; + intel_runtime_pm_put(dev_priv); break; case I915_PARAM_MMAP_GTT_VERSION: /* Though we've started our numbering from 1, and so class all @@@ -348,8 -350,6 +348,8 @@@ case I915_PARAM_HAS_EXEC_HANDLE_LUT: case I915_PARAM_HAS_COHERENT_PHYS_GTT: case I915_PARAM_HAS_EXEC_SOFTPIN: + case I915_PARAM_HAS_EXEC_ASYNC: + case I915_PARAM_HAS_EXEC_FENCE: /* For the time being all of these are always true; * if some supported hardware does not have one of these * features this value needs to be provided from @@@ -756,15 -756,6 +756,15 @@@ out_err return -ENOMEM; } +static void i915_engines_cleanup(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, i915, id) + kfree(engine); +} + static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) { destroy_workqueue(dev_priv->hotplug.dp_wq); @@@ -778,17 -769,10 +778,17 @@@ */ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) { - if (IS_HSW_EARLY_SDV(dev_priv) || - IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0)) + bool pre = false; + + pre |= IS_HSW_EARLY_SDV(dev_priv); + pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0); + pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST); + + if (pre) { DRM_ERROR("This is a pre-production stepping. " "It may not be fully functional.\n"); + add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK); + } } /** @@@ -824,7 -808,6 +824,7 @@@ static int i915_driver_init_early(struc spin_lock_init(&dev_priv->gpu_error.lock); mutex_init(&dev_priv->backlight_lock); spin_lock_init(&dev_priv->uncore.lock); + spin_lock_init(&dev_priv->mm.object_stat_lock); spin_lock_init(&dev_priv->mmio_flip_lock); spin_lock_init(&dev_priv->wm.dsparb_lock); @@@ -835,15 -818,12 +835,15 @@@ mutex_init(&dev_priv->pps_mutex); intel_uc_init_early(dev_priv); - i915_memcpy_init_early(dev_priv); + ret = intel_engines_init_early(dev_priv); + if (ret) + return ret; + ret = i915_workqueues_init(dev_priv); if (ret < 0) - return ret; + goto err_engines; /* This must be called before any calls to HAS_PCH_* */ intel_detect_pch(dev_priv); @@@ -872,8 -852,6 +872,8 @@@ err_workqueues: i915_workqueues_cleanup(dev_priv); +err_engines: + i915_engines_cleanup(dev_priv); return ret; } @@@ -886,7 -864,6 +886,7 @@@ static void i915_driver_cleanup_early(s i915_perf_fini(dev_priv); i915_gem_load_cleanup(dev_priv); i915_workqueues_cleanup(dev_priv); + i915_engines_cleanup(dev_priv); } static int i915_mmio_setup(struct drm_i915_private *dev_priv) @@@ -953,7 -930,6 +953,7 @@@ static int i915_driver_init_mmio(struc goto put_bridge; intel_uncore_init(dev_priv); + i915_gem_init_mmio(dev_priv); return 0; @@@ -991,7 -967,7 +991,7 @@@ static void intel_sanitize_options(stru DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores); - DRM_DEBUG_DRIVER("use GPU sempahores? %s\n", yesno(i915.semaphores)); + DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores)); } /** @@@ -1163,7 -1139,7 +1163,7 @@@ static void i915_driver_register(struc if (IS_GEN5(dev_priv)) intel_gpu_ips_init(dev_priv); - i915_audio_component_init(dev_priv); + intel_audio_init(dev_priv); /* * Some ports require correctly set-up hpd registers for detection to @@@ -1181,7 -1157,7 +1181,7 @@@ */ static void i915_driver_unregister(struct drm_i915_private *dev_priv) { - i915_audio_component_cleanup(dev_priv); + intel_audio_deinit(dev_priv); intel_gpu_ips_teardown(); acpi_video_unregister(); @@@ -1191,6 -1167,7 +1191,6 @@@ i915_teardown_sysfs(dev_priv); i915_guc_log_unregister(dev_priv); - i915_debugfs_unregister(dev_priv); drm_dev_unregister(&dev_priv->drm); i915_gem_shrinker_cleanup(dev_priv); @@@ -1209,15 -1186,11 +1209,15 @@@ */ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) { + const struct intel_device_info *match_info = + (struct intel_device_info *)ent->driver_data; struct drm_i915_private *dev_priv; int ret; - if (i915.nuclear_pageflip) - driver.driver_features |= DRIVER_ATOMIC; + /* Enable nuclear pageflip on ILK+, except vlv/chv */ + if (!i915.nuclear_pageflip && + (match_info->gen < 5 || match_info->has_gmch_display)) + driver.driver_features &= ~DRIVER_ATOMIC; ret = -ENOMEM; dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); @@@ -1225,7 -1198,8 +1225,7 @@@ ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev); if (ret) { DRM_DEV_ERROR(&pdev->dev, "allocation failed\n"); - kfree(dev_priv); - return ret; + goto out_free; } dev_priv->drm.pdev = pdev; @@@ -1233,7 -1207,7 +1233,7 @@@ ret = pci_enable_device(pdev); if (ret) - goto out_free_priv; + goto out_fini; pci_set_drvdata(pdev, &dev_priv->drm); @@@ -1297,11 -1271,9 +1297,11 @@@ out_runtime_pm_put i915_driver_cleanup_early(dev_priv); out_pci_disable: pci_disable_device(pdev); -out_free_priv: +out_fini: i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret); - drm_dev_unref(&dev_priv->drm); + drm_dev_fini(&dev_priv->drm); +out_free: + kfree(dev_priv); return ret; } @@@ -1309,8 -1281,6 +1309,8 @@@ void i915_driver_unload(struct drm_devi { struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; + struct drm_modeset_acquire_ctx ctx; + int ret; intel_fbdev_fini(dev); @@@ -1319,24 -1289,6 +1319,24 @@@ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + drm_modeset_acquire_init(&ctx, 0); + while (1) { + ret = drm_modeset_lock_all_ctx(dev, &ctx); + if (!ret) + ret = drm_atomic_helper_disable_all(dev, &ctx); + + if (ret != -EDEADLK) + break; + + drm_modeset_backoff(&ctx); + } + + if (ret) + DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + intel_gvt_cleanup(dev_priv); i915_driver_unregister(dev_priv); @@@ -1366,7 -1318,7 +1366,7 @@@ /* Free error state after interrupts are fully disabled. */ cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); - i915_destroy_error_state(dev_priv); + i915_reset_error_state(dev_priv); /* Flush any outstanding unpin_work. */ drain_workqueue(dev_priv->wq); @@@ -1382,16 -1334,8 +1382,16 @@@ i915_driver_cleanup_mmio(dev_priv); intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); +} + +static void i915_driver_release(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); i915_driver_cleanup_early(dev_priv); + drm_dev_fini(&dev_priv->drm); + + kfree(dev_priv); } static int i915_driver_open(struct drm_device *dev, struct drm_file *file) @@@ -1512,7 -1456,7 +1512,7 @@@ static int i915_drm_suspend(struct drm_ opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; intel_opregion_notify_adapter(dev_priv, opregion_target_state); - intel_uncore_forcewake_reset(dev_priv, false); + intel_uncore_suspend(dev_priv); intel_opregion_unregister(dev_priv); intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); @@@ -1757,7 -1701,7 +1757,7 @@@ static int i915_drm_resume_early(struc DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", ret); - intel_uncore_early_sanitize(dev_priv, true); + intel_uncore_resume_early(dev_priv); if (IS_GEN9_LP(dev_priv)) { if (!dev_priv->suspended_to_idle) @@@ -1773,8 -1717,6 +1773,8 @@@ !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) intel_power_domains_init_hw(dev_priv, true); + i915_gem_sanitize(dev_priv); + enable_rpm_wakeref_asserts(dev_priv); out: @@@ -1846,7 -1788,7 +1846,7 @@@ void i915_reset(struct drm_i915_privat goto error; } - i915_gem_reset_finish(dev_priv); + i915_gem_reset(dev_priv); intel_overlay_reset(dev_priv); /* Ok, now get things going again... */ @@@ -1872,7 -1814,6 +1872,7 @@@ i915_queue_hangcheck(dev_priv); wakeup: + i915_gem_reset_finish(dev_priv); enable_irq(dev_priv->drm.irq); wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS); return; @@@ -2402,7 -2343,7 +2402,7 @@@ static int intel_runtime_suspend(struc return ret; } - intel_uncore_forcewake_reset(dev_priv, false); + intel_uncore_suspend(dev_priv); enable_rpm_wakeref_asserts(dev_priv); WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); @@@ -2592,7 -2533,7 +2592,7 @@@ static const struct drm_ioctl_desc i915 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), @@@ -2634,8 -2575,7 +2634,8 @@@ static struct drm_driver driver = */ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | - DRIVER_RENDER | DRIVER_MODESET, + DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC, + .release = i915_driver_release, .open = i915_driver_open, .lastclose = i915_driver_lastclose, .preclose = i915_driver_preclose, @@@ -2664,7 -2604,3 +2664,7 @@@ .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, }; + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftests/mock_drm.c" +#endif diff --combined drivers/gpu/drm/i915/i915_drv.h index f4d87d3,19a5188..7298b30 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@@ -79,8 -79,8 +79,8 @@@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20170123" -#define DRIVER_TIMESTAMP 1485156432 +#define DRIVER_DATE "20170306" +#define DRIVER_TIMESTAMP 1488785683 #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@@ -293,7 -293,6 +293,7 @@@ enum plane_id PLANE_PRIMARY, PLANE_SPRITE0, PLANE_SPRITE1, + PLANE_SPRITE2, PLANE_CURSOR, I915_MAX_PLANES, }; @@@ -344,11 -343,6 +344,11 @@@ enum intel_display_power_domain POWER_DOMAIN_PORT_DDI_C_LANES, POWER_DOMAIN_PORT_DDI_D_LANES, POWER_DOMAIN_PORT_DDI_E_LANES, + POWER_DOMAIN_PORT_DDI_A_IO, + POWER_DOMAIN_PORT_DDI_B_IO, + POWER_DOMAIN_PORT_DDI_C_IO, + POWER_DOMAIN_PORT_DDI_D_IO, + POWER_DOMAIN_PORT_DDI_E_IO, POWER_DOMAIN_PORT_DSI, POWER_DOMAIN_PORT_CRT, POWER_DOMAIN_PORT_OTHER, @@@ -390,8 -384,6 +390,8 @@@ enum hpd_pin #define for_each_hpd_pin(__pin) \ for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) +#define HPD_STORM_DEFAULT_THRESHOLD 5 + struct i915_hotplug { struct work_struct hotplug_work; @@@ -415,8 -407,6 +415,8 @@@ struct work_struct poll_init_work; bool poll_enabled; + unsigned int hpd_storm_threshold; + /* * if we get a HPD irq from DP and a HPD irq from non-DP * the non-DP HPD could block the workqueue on a mode config @@@ -489,8 -479,10 +489,8 @@@ &(dev)->mode_config.encoder_list, \ base.head) -#define for_each_intel_connector(dev, intel_connector) \ - list_for_each_entry(intel_connector, \ - &(dev)->mode_config.connector_list, \ - base.head) +#define for_each_intel_connector_iter(intel_connector, iter) \ + while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter)))) #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ @@@ -502,35 -494,7 +502,35 @@@ #define for_each_power_domain(domain, mask) \ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ - for_each_if ((1 << (domain)) & (mask)) + for_each_if (BIT_ULL(domain) & (mask)) + +#define for_each_power_well(__dev_priv, __power_well) \ + for ((__power_well) = (__dev_priv)->power_domains.power_wells; \ + (__power_well) - (__dev_priv)->power_domains.power_wells < \ + (__dev_priv)->power_domains.power_well_count; \ + (__power_well)++) + +#define for_each_power_well_rev(__dev_priv, __power_well) \ + for ((__power_well) = (__dev_priv)->power_domains.power_wells + \ + (__dev_priv)->power_domains.power_well_count - 1; \ + (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \ + (__power_well)--) + +#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \ + for_each_power_well(__dev_priv, __power_well) \ + for_each_if ((__power_well)->domains & (__domain_mask)) + +#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \ + for_each_power_well_rev(__dev_priv, __power_well) \ + for_each_if ((__power_well)->domains & (__domain_mask)) + +#define for_each_intel_plane_in_state(__state, plane, plane_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->base.dev->mode_config.num_total_plane && \ + ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \ + (plane_state) = to_intel_plane_state((__state)->base.planes[__i].state), 1); \ + (__i)++) \ + for_each_if (plane_state) struct drm_i915_private; struct i915_mm_struct; @@@ -636,13 -600,9 +636,13 @@@ struct intel_initial_plane_config struct intel_crtc; struct intel_limit; struct dpll; +struct intel_cdclk_state; struct drm_i915_display_funcs { - int (*get_display_clock_speed)(struct drm_i915_private *dev_priv); + void (*get_cdclk)(struct drm_i915_private *dev_priv, + struct intel_cdclk_state *cdclk_state); + void (*set_cdclk)(struct drm_i915_private *dev_priv, + const struct intel_cdclk_state *cdclk_state); int (*get_fifo_size)(struct drm_i915_private *dev_priv, int plane); int (*compute_pipe_wm)(struct intel_crtc_state *cstate); int (*compute_intermediate_wm)(struct drm_device *dev, @@@ -657,6 -617,7 +657,6 @@@ int (*compute_global_watermarks)(struct drm_atomic_state *state); void (*update_wm)(struct intel_crtc *crtc); int (*modeset_calc_cdclk)(struct drm_atomic_state *state); - void (*modeset_commit_cdclk)(struct drm_atomic_state *state); /* Returns the active state of the crtc, and if the crtc is active, * fills out the pipe-config with the hw state. */ bool (*get_pipe_config)(struct intel_crtc *, @@@ -675,8 -636,7 +675,8 @@@ struct intel_encoder *encoder, const struct drm_display_mode *adjusted_mode); void (*audio_codec_disable)(struct intel_encoder *encoder); - void (*fdi_link_train)(struct drm_crtc *crtc); + void (*fdi_link_train)(struct intel_crtc *crtc, + const struct intel_crtc_state *crtc_state); void (*init_clock_gating)(struct drm_i915_private *dev_priv); int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, @@@ -762,6 -722,7 +762,7 @@@ struct intel_uncore const struct intel_forcewake_range *fw_domains_table; unsigned int fw_domains_table_entries; + struct notifier_block pmic_bus_access_nb; struct intel_uncore_funcs funcs; unsigned fifo_count; @@@ -896,7 -857,6 +897,7 @@@ enum intel_platform INTEL_BROXTON, INTEL_KABYLAKE, INTEL_GEMINILAKE, + INTEL_MAX_PLATFORMS }; struct intel_device_info { @@@ -931,7 -891,7 +932,7 @@@ struct intel_display_error_state; -struct drm_i915_error_state { +struct i915_gpu_state { struct kref ref; struct timeval time; struct timeval boottime; @@@ -941,20 -901,16 +942,20 @@@ char error_msg[128]; bool simulated; + bool awake; + bool wakelock; + bool suspended; int iommu; u32 reset_count; u32 suspend_count; struct intel_device_info device_info; + struct i915_params params; /* Generic register state */ u32 eir; u32 pgtbl_er; u32 ier; - u32 gtier[4]; + u32 gtier[4], ngtier; u32 ccid; u32 derrmr; u32 forcewake; @@@ -968,7 -924,6 +969,7 @@@ u32 gab_ctl; u32 gfx_mode; + u32 nfence; u64 fence[I915_MAX_NUM_FENCES]; struct intel_overlay_error_state *overlay; struct intel_display_error_state *display; @@@ -1016,16 -971,6 +1017,16 @@@ u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; struct intel_instdone instdone; + struct drm_i915_error_context { + char comm[TASK_COMM_LEN]; + pid_t pid; + u32 handle; + u32 hw_id; + int ban_score; + int active; + int guilty; + } context; + struct drm_i915_error_object { u64 gtt_offset; u64 gtt_size; @@@ -1059,6 -1004,10 +1060,6 @@@ u32 pp_dir_base; }; } vm_info; - - pid_t pid; - char comm[TASK_COMM_LEN]; - int context_bans; } engine[I915_NUM_ENGINES]; struct drm_i915_error_buffer { @@@ -1337,7 -1286,7 +1338,7 @@@ struct intel_gen6_power_mgmt u32 pm_iir; /* PM interrupt bits that should never be masked */ - u32 pm_intr_keep; + u32 pm_intrmsk_mbz; /* Frequencies are stored in potentially platform dependent multiples. * In other words, *_freq needs to be multiplied by X to be interesting. @@@ -1376,7 -1325,7 +1377,7 @@@ unsigned boosts; /* manual wa residency calculations */ - struct intel_rps_ei up_ei, down_ei; + struct intel_rps_ei ei; /* * Protects RPS/RC6 register access and PCU communication. @@@ -1447,7 -1396,7 +1448,7 @@@ struct i915_power_well int count; /* cached hw enabled state */ bool hw_enabled; - unsigned long domains; + u64 domains; /* unique identifier for this power well */ unsigned long id; /* @@@ -1508,7 -1457,7 +1509,7 @@@ struct i915_gem_mm struct work_struct free_work; /** Usable portion of the GTT for GEM */ - phys_addr_t stolen_base; /* limited to low memory (32-bit) */ + dma_addr_t stolen_base; /* limited to low memory (32-bit) */ /** PPGTT used for aliasing the PPGTT with the GTT */ struct i915_hw_ppgtt *aliasing_ppgtt; @@@ -1550,6 -1499,11 +1551,6 @@@ struct drm_i915_error_state_buf loff_t pos; }; -struct i915_error_state_file_priv { - struct drm_i915_private *i915; - struct drm_i915_error_state *error; -}; - #define I915_RESET_TIMEOUT (10 * HZ) /* 10s */ #define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */ @@@ -1566,7 -1520,7 +1567,7 @@@ struct i915_gpu_error /* For reset and error_state handling. */ spinlock_t lock; /* Protected by the above dev->gpu_error.lock. */ - struct drm_i915_error_state *first_error; + struct i915_gpu_state *first_error; unsigned long missed_irq_rings; @@@ -2100,10 -2054,6 +2101,10 @@@ struct i915_oa_ops bool (*oa_buffer_is_empty)(struct drm_i915_private *dev_priv); }; +struct intel_cdclk_state { + unsigned int cdclk, vco, ref; +}; + struct drm_i915_private { struct drm_device drm; @@@ -2114,6 -2064,8 +2115,6 @@@ const struct intel_device_info info; - int relative_constants_mode; - void __iomem *regs; struct intel_uncore uncore; @@@ -2206,7 -2158,13 +2207,7 @@@ unsigned int fsb_freq, mem_freq, is_ddr3; unsigned int skl_preferred_vco_freq; - unsigned int cdclk_freq, max_cdclk_freq; - - /* - * For reading holding any crtc lock is sufficient, - * for writing must hold all of them. - */ - unsigned int atomic_cdclk_freq; + unsigned int max_cdclk_freq; unsigned int max_dotclk_freq; unsigned int rawclk_freq; @@@ -2214,22 -2172,8 +2215,22 @@@ unsigned int czclk_freq; struct { - unsigned int vco, ref; - } cdclk_pll; + /* + * The current logical cdclk state. + * See intel_atomic_state.cdclk.logical + * + * For reading holding any crtc lock is sufficient, + * for writing must hold all of them. + */ + struct intel_cdclk_state logical; + /* + * The current actual cdclk state. + * See intel_atomic_state.cdclk.actual + */ + struct intel_cdclk_state actual; + /* The current hardware cdclk state */ + struct intel_cdclk_state hw; + } cdclk; /** * wq - Driver workqueue for GEM. @@@ -2516,12 -2460,6 +2517,12 @@@ /* Used to save the pipe-to-encoder mapping for audio */ struct intel_encoder *av_enc_map[I915_MAX_PIPES]; + /* necessary resource sharing with HDMI LPE audio driver. */ + struct { + struct platform_device *platdev; + int irq; + } lpe_audio; + /* * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch * will be rejected. Instead look for a better place. @@@ -2809,12 -2747,6 +2810,12 @@@ intel_info(const struct drm_i915_privat #define IS_KBL_REVID(dev_priv, since, until) \ (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until)) +#define GLK_REVID_A0 0x0 +#define GLK_REVID_A1 0x1 + +#define IS_GLK_REVID(dev_priv, since, until) \ + (IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until)) + /* * The genX designation typically refers to the render engine, so render * capability related checks should use IS_GEN, while display and other checks @@@ -2830,9 -2762,8 +2831,9 @@@ #define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7))) #define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8))) -#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && INTEL_INFO(dev_priv)->is_lp) #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp) +#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv)) +#define IS_GEN9_BC(dev_priv) (IS_GEN9(dev_priv) && !IS_LP(dev_priv)) #define ENGINE_MASK(id) BIT(id) #define RENDER_RING ENGINE_MASK(RCS) @@@ -2874,7 -2805,9 +2875,7 @@@ /* WaRsDisableCoarsePowerGating:skl,bxt */ #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ - (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \ - IS_SKL_GT3(dev_priv) || \ - IS_SKL_GT4(dev_priv)) + (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv)) /* * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts @@@ -3014,9 -2947,6 +3015,9 @@@ extern unsigned long i915_gfx_val(struc extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); +int intel_engines_init_early(struct drm_i915_private *dev_priv); +int intel_engines_init(struct drm_i915_private *dev_priv); + /* intel_hotplug.c */ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 pin_mask, u32 long_mask); @@@ -3055,14 -2985,12 +3056,12 @@@ int intel_irq_install(struct drm_i915_p void intel_irq_uninstall(struct drm_i915_private *dev_priv); extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv); - extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, - bool restore_forcewake); extern void intel_uncore_init(struct drm_i915_private *dev_priv); extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); extern void intel_uncore_fini(struct drm_i915_private *dev_priv); - extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, - bool restore); + extern void intel_uncore_suspend(struct drm_i915_private *dev_priv); + extern void intel_uncore_resume_early(struct drm_i915_private *dev_priv); const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, enum forcewake_domains domains); @@@ -3194,7 -3122,6 +3193,7 @@@ int i915_gem_get_aperture_ioctl(struct struct drm_file *file_priv); int i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +void i915_gem_sanitize(struct drm_i915_private *i915); int i915_gem_load_init(struct drm_i915_private *dev_priv); void i915_gem_load_cleanup(struct drm_i915_private *dev_priv); void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); @@@ -3407,20 -3334,18 +3406,20 @@@ static inline u32 i915_reset_count(stru } int i915_gem_reset_prepare(struct drm_i915_private *dev_priv); +void i915_gem_reset(struct drm_i915_private *dev_priv); void i915_gem_reset_finish(struct drm_i915_private *dev_priv); void i915_gem_set_wedged(struct drm_i915_private *dev_priv); -void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); + +void i915_gem_init_mmio(struct drm_i915_private *i915); int __must_check i915_gem_init(struct drm_i915_private *dev_priv); int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv); -int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, - unsigned int flags); +int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, + unsigned int flags); int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv); void i915_gem_resume(struct drm_i915_private *dev_priv); -int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); +int i915_gem_fault(struct vm_fault *vmf); int i915_gem_object_wait(struct drm_i915_gem_object *obj, unsigned int flags, long timeout, @@@ -3596,10 -3521,12 +3595,10 @@@ u32 i915_gem_fence_alignment(struct drm /* i915_debugfs.c */ #ifdef CONFIG_DEBUG_FS int i915_debugfs_register(struct drm_i915_private *dev_priv); -void i915_debugfs_unregister(struct drm_i915_private *dev_priv); int i915_debugfs_connector_add(struct drm_connector *connector); void intel_display_crc_init(struct drm_i915_private *dev_priv); #else static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;} -static inline void i915_debugfs_unregister(struct drm_i915_private *dev_priv) {} static inline int i915_debugfs_connector_add(struct drm_connector *connector) { return 0; } static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {} @@@ -3611,7 -3538,7 +3610,7 @@@ __printf(2, 3) void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, - const struct i915_error_state_file_priv *error); + const struct i915_gpu_state *gpu); int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, struct drm_i915_private *i915, size_t count, loff_t pos); @@@ -3620,28 -3547,13 +3619,28 @@@ static inline void i915_error_state_buf { kfree(eb->buf); } + +struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915); void i915_capture_error_state(struct drm_i915_private *dev_priv, u32 engine_mask, const char *error_msg); -void i915_error_state_get(struct drm_device *dev, - struct i915_error_state_file_priv *error_priv); -void i915_error_state_put(struct i915_error_state_file_priv *error_priv); -void i915_destroy_error_state(struct drm_i915_private *dev_priv); + +static inline struct i915_gpu_state * +i915_gpu_state_get(struct i915_gpu_state *gpu) +{ + kref_get(&gpu->ref); + return gpu; +} + +void __i915_gpu_state_free(struct kref *kref); +static inline void i915_gpu_state_put(struct i915_gpu_state *gpu) +{ + if (gpu) + kref_put(&gpu->ref, __i915_gpu_state_free); +} + +struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915); +void i915_reset_error_state(struct drm_i915_private *i915); #else @@@ -3651,13 -3563,7 +3650,13 @@@ static inline void i915_capture_error_s { } -static inline void i915_destroy_error_state(struct drm_i915_private *dev_priv) +static inline struct i915_gpu_state * +i915_first_error_state(struct drm_i915_private *i915) +{ + return NULL; +} + +static inline void i915_reset_error_state(struct drm_i915_private *i915) { } @@@ -3690,14 -3596,6 +3689,14 @@@ extern int i915_restore_state(struct dr void i915_setup_sysfs(struct drm_i915_private *dev_priv); void i915_teardown_sysfs(struct drm_i915_private *dev_priv); +/* intel_lpe_audio.c */ +int intel_lpe_audio_init(struct drm_i915_private *dev_priv); +void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv); +void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv); +void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, + void *eld, int port, int pipe, int tmds_clk_speed, + bool dp_output, int link_rate); + /* intel_i2c.c */ extern int intel_setup_gmbus(struct drm_i915_private *dev_priv); extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv); @@@ -3797,7 -3695,7 +3796,7 @@@ extern void i915_redisable_vga(struct d extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv); extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv); -extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val); +extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val); extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); @@@ -3813,6 -3711,7 +3812,6 @@@ extern void intel_overlay_print_error_s extern struct intel_display_error_state * intel_display_capture_error_state(struct drm_i915_private *dev_priv); extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, - struct drm_i915_private *dev_priv, struct intel_display_error_state *error); int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); @@@ -3822,7 -3721,7 +3821,7 @@@ int skl_pcode_request(struct drm_i915_p /* intel_sideband.c */ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); -void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); +int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg); void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val); @@@ -4041,34 -3940,14 +4040,34 @@@ wait_remaining_ms_from_jiffies(unsigne } static inline bool -__i915_request_irq_complete(struct drm_i915_gem_request *req) +__i915_request_irq_complete(const struct drm_i915_gem_request *req) { struct intel_engine_cs *engine = req->engine; + u32 seqno; + + /* Note that the engine may have wrapped around the seqno, and + * so our request->global_seqno will be ahead of the hardware, + * even though it completed the request before wrapping. We catch + * this by kicking all the waiters before resetting the seqno + * in hardware, and also signal the fence. + */ + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &req->fence.flags)) + return true; + + /* The request was dequeued before we were awoken. We check after + * inspecting the hw to confirm that this was the same request + * that generated the HWS update. The memory barriers within + * the request execution are sufficient to ensure that a check + * after reading the value from hw matches this request. + */ + seqno = i915_gem_request_global_seqno(req); + if (!seqno) + return false; /* Before we do the heavier coherent read of the seqno, * check the value (hopefully) in the CPU cacheline. */ - if (__i915_gem_request_completed(req)) + if (__i915_gem_request_completed(req, seqno)) return true; /* Ensure our read of the seqno is coherent so that we @@@ -4083,8 -3962,9 +4082,8 @@@ * is woken. */ if (engine->irq_seqno_barrier && - rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh) == current && - cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) { - struct task_struct *tsk; + test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) { + struct intel_breadcrumbs *b = &engine->breadcrumbs; /* The ordering of irq_posted versus applying the barrier * is crucial. The clearing of the current irq_posted must @@@ -4106,18 -3986,19 +4105,18 @@@ * the seqno before we believe it coherent since they see * irq_posted == false but we are still running). */ - rcu_read_lock(); - tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh); - if (tsk && tsk != current) + spin_lock_irq(&b->irq_lock); + if (b->irq_wait && b->irq_wait->tsk != current) /* Note that if the bottom-half is changed as we * are sending the wake-up, the new bottom-half will * be woken by whomever made the change. We only have * to worry about when we steal the irq-posted for * ourself. */ - wake_up_process(tsk); - rcu_read_unlock(); + wake_up_process(b->irq_wait->tsk); + spin_unlock_irq(&b->irq_lock); - if (__i915_gem_request_completed(req)) + if (__i915_gem_request_completed(req, seqno)) return true; } @@@ -4148,10 -4029,4 +4147,10 @@@ int remap_io_mapping(struct vm_area_str unsigned long addr, unsigned long pfn, unsigned long size, struct io_mapping *iomap); +static inline bool i915_gem_object_is_coherent(struct drm_i915_gem_object *obj) +{ + return (obj->cache_level != I915_CACHE_NONE || + HAS_LLC(to_i915(obj->base.dev))); +} + #endif diff --combined drivers/gpu/drm/i915/intel_uncore.c index f1c0da0,6106a54..71b9b38 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@@ -25,6 -25,7 +25,7 @@@ #include "intel_drv.h" #include "i915_vgpu.h" + #include #include #define FORCEWAKE_ACK_TIMEOUT_MS 50 @@@ -119,8 -120,6 +120,8 @@@ fw_domains_get(struct drm_i915_private for_each_fw_domain_masked(d, fw_domains, dev_priv) fw_domain_wait_ack(d); + + dev_priv->uncore.fw_domains_active |= fw_domains; } static void @@@ -132,15 -131,6 +133,15 @@@ fw_domains_put(struct drm_i915_private fw_domain_put(d); fw_domain_posting_read(d); } + + dev_priv->uncore.fw_domains_active &= ~fw_domains; +} + +static void +vgpu_fw_domains_nop(struct drm_i915_private *dev_priv, + enum forcewake_domains fw_domains) +{ + /* Guest driver doesn't need to takes care forcewake. */ } static void @@@ -251,16 -241,18 +252,16 @@@ intel_uncore_fw_release_timer(struct hr if (WARN_ON(domain->wake_count == 0)) domain->wake_count++; - if (--domain->wake_count == 0) { + if (--domain->wake_count == 0) dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask); - dev_priv->uncore.fw_domains_active &= ~domain->mask; - } spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); return HRTIMER_NORESTART; } - void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, - bool restore) + static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, + bool restore) { unsigned long irqflags; struct intel_uncore_forcewake_domain *domain; @@@ -436,10 -428,18 +437,18 @@@ static void __intel_uncore_early_saniti intel_uncore_forcewake_reset(dev_priv, restore_forcewake); } - void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, - bool restore_forcewake) + void intel_uncore_suspend(struct drm_i915_private *dev_priv) { - __intel_uncore_early_sanitize(dev_priv, restore_forcewake); + iosf_mbi_unregister_pmic_bus_access_notifier( + &dev_priv->uncore.pmic_bus_access_nb); + intel_uncore_forcewake_reset(dev_priv, false); + } + + void intel_uncore_resume_early(struct drm_i915_private *dev_priv) + { + __intel_uncore_early_sanitize(dev_priv, true); + iosf_mbi_register_pmic_bus_access_notifier( + &dev_priv->uncore.pmic_bus_access_nb); i915_check_and_clear_faults(dev_priv); } @@@ -463,8 -463,10 +472,8 @@@ static void __intel_uncore_forcewake_ge fw_domains &= ~domain->mask; } - if (fw_domains) { + if (fw_domains) dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); - dev_priv->uncore.fw_domains_active |= fw_domains; - } } /** @@@ -506,7 -508,7 +515,7 @@@ void intel_uncore_forcewake_get(struct void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) { - assert_spin_locked(&dev_priv->uncore.lock); + lockdep_assert_held(&dev_priv->uncore.lock); if (!dev_priv->uncore.funcs.force_wake_get) return; @@@ -564,7 -566,7 +573,7 @@@ void intel_uncore_forcewake_put(struct void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) { - assert_spin_locked(&dev_priv->uncore.lock); + lockdep_assert_held(&dev_priv->uncore.lock); if (!dev_priv->uncore.funcs.force_wake_put) return; @@@ -642,6 -644,33 +651,6 @@@ find_fw_domain(struct drm_i915_private return entry->domains; } -static void -intel_fw_table_check(struct drm_i915_private *dev_priv) -{ - const struct intel_forcewake_range *ranges; - unsigned int num_ranges; - s32 prev; - unsigned int i; - - if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG)) - return; - - ranges = dev_priv->uncore.fw_domains_table; - if (!ranges) - return; - - num_ranges = dev_priv->uncore.fw_domains_table_entries; - - for (i = 0, prev = -1; i < num_ranges; i++, ranges++) { - WARN_ON_ONCE(IS_GEN9(dev_priv) && - (prev + 1) != (s32)ranges->start); - WARN_ON_ONCE(prev >= (s32)ranges->start); - prev = ranges->start; - WARN_ON_ONCE(prev >= (s32)ranges->end); - prev = ranges->end; - } -} - #define GEN_FW_RANGE(s, e, d) \ { .start = (s), .end = (e), .domains = (d) } @@@ -680,6 -709,23 +689,6 @@@ static const i915_reg_t gen8_shadowed_r /* TODO: Other registers are not yet used */ }; -static void intel_shadow_table_check(void) -{ - const i915_reg_t *reg = gen8_shadowed_regs; - s32 prev; - u32 offset; - unsigned int i; - - if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG)) - return; - - for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) { - offset = i915_mmio_reg_offset(*reg); - WARN_ON_ONCE(prev >= (s32)offset); - prev = offset; - } -} - static int mmio_reg_cmp(u32 key, const i915_reg_t *reg) { u32 offset = i915_mmio_reg_offset(*reg); @@@ -931,6 -977,7 +940,6 @@@ static noinline void ___force_wake_auto fw_domain_arm_timer(domain); dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); - dev_priv->uncore.fw_domains_active |= fw_domains; } static inline void __force_wake_auto(struct drm_i915_private *dev_priv, @@@ -947,19 -994,29 +956,19 @@@ ___force_wake_auto(dev_priv, fw_domains); } -#define __gen6_read(x) \ -static u##x \ -gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ - enum forcewake_domains fw_engine; \ - GEN6_READ_HEADER(x); \ - fw_engine = __gen6_reg_read_fw_domains(offset); \ - if (fw_engine) \ - __force_wake_auto(dev_priv, fw_engine); \ - val = __raw_i915_read##x(dev_priv, reg); \ - GEN6_READ_FOOTER; \ -} - -#define __fwtable_read(x) \ +#define __gen_read(func, x) \ static u##x \ -fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ +func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ enum forcewake_domains fw_engine; \ GEN6_READ_HEADER(x); \ - fw_engine = __fwtable_reg_read_fw_domains(offset); \ + fw_engine = __##func##_reg_read_fw_domains(offset); \ if (fw_engine) \ __force_wake_auto(dev_priv, fw_engine); \ val = __raw_i915_read##x(dev_priv, reg); \ GEN6_READ_FOOTER; \ } +#define __gen6_read(x) __gen_read(gen6, x) +#define __fwtable_read(x) __gen_read(fwtable, x) #define __gen9_decoupled_read(x) \ static u##x \ @@@ -997,6 -1054,34 +1006,6 @@@ __gen6_read(64 #undef GEN6_READ_FOOTER #undef GEN6_READ_HEADER -#define VGPU_READ_HEADER(x) \ - unsigned long irqflags; \ - u##x val = 0; \ - assert_rpm_device_not_suspended(dev_priv); \ - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) - -#define VGPU_READ_FOOTER \ - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ - trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ - return val - -#define __vgpu_read(x) \ -static u##x \ -vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ - VGPU_READ_HEADER(x); \ - val = __raw_i915_read##x(dev_priv, reg); \ - VGPU_READ_FOOTER; \ -} - -__vgpu_read(8) -__vgpu_read(16) -__vgpu_read(32) -__vgpu_read(64) - -#undef __vgpu_read -#undef VGPU_READ_FOOTER -#undef VGPU_READ_HEADER - #define GEN2_WRITE_HEADER \ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ assert_rpm_wakelock_held(dev_priv); \ @@@ -1060,19 -1145,29 +1069,19 @@@ gen6_write##x(struct drm_i915_private * GEN6_WRITE_FOOTER; \ } -#define __gen8_write(x) \ -static void \ -gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ - enum forcewake_domains fw_engine; \ - GEN6_WRITE_HEADER; \ - fw_engine = __gen8_reg_write_fw_domains(offset); \ - if (fw_engine) \ - __force_wake_auto(dev_priv, fw_engine); \ - __raw_i915_write##x(dev_priv, reg, val); \ - GEN6_WRITE_FOOTER; \ -} - -#define __fwtable_write(x) \ +#define __gen_write(func, x) \ static void \ -fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ +func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ enum forcewake_domains fw_engine; \ GEN6_WRITE_HEADER; \ - fw_engine = __fwtable_reg_write_fw_domains(offset); \ + fw_engine = __##func##_reg_write_fw_domains(offset); \ if (fw_engine) \ __force_wake_auto(dev_priv, fw_engine); \ __raw_i915_write##x(dev_priv, reg, val); \ GEN6_WRITE_FOOTER; \ } +#define __gen8_write(x) __gen_write(gen8, x) +#define __fwtable_write(x) __gen_write(fwtable, x) #define __gen9_decoupled_write(x) \ static void \ @@@ -1109,6 -1204,31 +1118,6 @@@ __gen6_write(32 #undef GEN6_WRITE_FOOTER #undef GEN6_WRITE_HEADER -#define VGPU_WRITE_HEADER \ - unsigned long irqflags; \ - trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ - assert_rpm_device_not_suspended(dev_priv); \ - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) - -#define VGPU_WRITE_FOOTER \ - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) - -#define __vgpu_write(x) \ -static void vgpu_write##x(struct drm_i915_private *dev_priv, \ - i915_reg_t reg, u##x val, bool trace) { \ - VGPU_WRITE_HEADER; \ - __raw_i915_write##x(dev_priv, reg, val); \ - VGPU_WRITE_FOOTER; \ -} - -__vgpu_write(8) -__vgpu_write(16) -__vgpu_write(32) - -#undef __vgpu_write -#undef VGPU_WRITE_FOOTER -#undef VGPU_WRITE_HEADER - #define ASSIGN_WRITE_MMIO_VFUNCS(x) \ do { \ dev_priv->uncore.funcs.mmio_writeb = x##_write8; \ @@@ -1264,11 -1384,6 +1273,11 @@@ static void intel_uncore_fw_domains_ini FORCEWAKE, FORCEWAKE_ACK); } + if (intel_vgpu_active(dev_priv)) { + dev_priv->uncore.funcs.force_wake_get = vgpu_fw_domains_nop; + dev_priv->uncore.funcs.force_wake_put = vgpu_fw_domains_nop; + } + /* All future platforms are expected to require complex power gating */ WARN_ON(dev_priv->uncore.fw_domains == 0); } @@@ -1280,6 -1395,32 +1289,32 @@@ dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \ } + static int i915_pmic_bus_access_notifier(struct notifier_block *nb, + unsigned long action, void *data) + { + struct drm_i915_private *dev_priv = container_of(nb, + struct drm_i915_private, uncore.pmic_bus_access_nb); + + switch (action) { + case MBI_PMIC_BUS_ACCESS_BEGIN: + /* + * forcewake all now to make sure that we don't need to do a + * forcewake later which on systems where this notifier gets + * called requires the punit to access to the shared pmic i2c + * bus, which will be busy after this notification, leading to: + * "render: timed out waiting for forcewake ack request." + * errors. + */ + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + break; + case MBI_PMIC_BUS_ACCESS_END: + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + break; + } + + return NOTIFY_OK; + } + void intel_uncore_init(struct drm_i915_private *dev_priv) { i915_check_vgpu(dev_priv); @@@ -1289,6 -1430,8 +1324,8 @@@ __intel_uncore_early_sanitize(dev_priv, false); dev_priv->uncore.unclaimed_mmio_check = 1; + dev_priv->uncore.pmic_bus_access_nb.notifier_call = + i915_pmic_bus_access_notifier; switch (INTEL_INFO(dev_priv)->gen) { default: @@@ -1339,6 -1482,18 +1376,9 @@@ break; } - intel_fw_table_check(dev_priv); - if (INTEL_GEN(dev_priv) >= 8) - intel_shadow_table_check(); - - if (intel_vgpu_active(dev_priv)) { - ASSIGN_WRITE_MMIO_VFUNCS(vgpu); - ASSIGN_READ_MMIO_VFUNCS(vgpu); - } - + iosf_mbi_register_pmic_bus_access_notifier( + &dev_priv->uncore.pmic_bus_access_nb); + i915_check_and_clear_faults(dev_priv); } #undef ASSIGN_WRITE_MMIO_VFUNCS @@@ -1346,6 -1501,9 +1386,9 @@@ void intel_uncore_fini(struct drm_i915_private *dev_priv) { + iosf_mbi_unregister_pmic_bus_access_notifier( + &dev_priv->uncore.pmic_bus_access_nb); + /* Paranoia: make sure we have disabled everything before we exit. */ intel_uncore_sanitize(dev_priv); intel_uncore_forcewake_reset(dev_priv, false); @@@ -1856,7 -2014,3 +1899,7 @@@ intel_uncore_forcewake_for_reg(struct d return fw_domains; } + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftests/intel_uncore.c" +#endif diff --combined drivers/i2c/busses/i2c-designware-core.c index 7a3faa5,8c3ba42..15a53481 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c @@@ -177,13 -177,13 +177,13 @@@ static u32 dw_readl(struct dw_i2c_dev * { u32 value; - if (dev->accessor_flags & ACCESS_16BIT) + if (dev->flags & ACCESS_16BIT) value = readw_relaxed(dev->base + offset) | (readw_relaxed(dev->base + offset + 2) << 16); else value = readl_relaxed(dev->base + offset); - if (dev->accessor_flags & ACCESS_SWAP) + if (dev->flags & ACCESS_SWAP) return swab32(value); else return value; @@@ -191,10 -191,10 +191,10 @@@ static void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset) { - if (dev->accessor_flags & ACCESS_SWAP) + if (dev->flags & ACCESS_SWAP) b = swab32(b); - if (dev->accessor_flags & ACCESS_16BIT) { + if (dev->flags & ACCESS_16BIT) { writew_relaxed((u16)b, dev->base + offset); writew_relaxed((u16)(b >> 16), dev->base + offset + 2); } else { @@@ -339,10 -339,10 +339,10 @@@ int i2c_dw_init(struct dw_i2c_dev *dev reg = dw_readl(dev, DW_IC_COMP_TYPE); if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) { /* Configure register endianess access */ - dev->accessor_flags |= ACCESS_SWAP; + dev->flags |= ACCESS_SWAP; } else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) { /* Configure register access mode 16bit */ - dev->accessor_flags |= ACCESS_16BIT; + dev->flags |= ACCESS_16BIT; } else if (reg != DW_IC_COMP_TYPE_VALUE) { dev_err(dev->dev, "Unknown Synopsys component type: " "0x%08x\n", reg); @@@ -475,28 -475,30 +475,28 @@@ static int i2c_dw_wait_bus_not_busy(str static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) { struct i2c_msg *msgs = dev->msgs; - u32 ic_tar = 0; + u32 ic_con, ic_tar = 0; /* Disable the adapter */ __i2c_dw_enable_and_wait(dev, false); /* if the slave address is ten bit address, enable 10BITADDR */ - if (dev->dynamic_tar_update_enabled) { + ic_con = dw_readl(dev, DW_IC_CON); + if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) { + ic_con |= DW_IC_CON_10BITADDR_MASTER; /* * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing - * mode has to be enabled via bit 12 of IC_TAR register, - * otherwise bit 4 of IC_CON is used. + * mode has to be enabled via bit 12 of IC_TAR register. + * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be + * detected from registers. */ - if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) - ic_tar = DW_IC_TAR_10BITADDR_MASTER; + ic_tar = DW_IC_TAR_10BITADDR_MASTER; } else { - u32 ic_con = dw_readl(dev, DW_IC_CON); - - if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) - ic_con |= DW_IC_CON_10BITADDR_MASTER; - else - ic_con &= ~DW_IC_CON_10BITADDR_MASTER; - dw_writel(dev, ic_con, DW_IC_CON); + ic_con &= ~DW_IC_CON_10BITADDR_MASTER; } + dw_writel(dev, ic_con, DW_IC_CON); + /* * Set the slave (target) address and enable 10-bit addressing mode * if applicable. @@@ -820,7 -822,7 +820,7 @@@ static u32 i2c_dw_func(struct i2c_adapt return dev->functionality; } -static struct i2c_algorithm i2c_dw_algo = { +static const struct i2c_algorithm i2c_dw_algo = { .master_xfer = i2c_dw_xfer, .functionality = i2c_dw_func, }; @@@ -924,7 -926,7 +924,7 @@@ static irqreturn_t i2c_dw_isr(int this_ tx_aborted: if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) complete(&dev->cmd_complete); - else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) { + else if (unlikely(dev->flags & ACCESS_INTR_MASK)) { /* workaround to trigger pending interrupt */ stat = dw_readl(dev, DW_IC_INTR_MASK); i2c_dw_disable_int(dev); @@@ -961,6 -963,7 +961,6 @@@ int i2c_dw_probe(struct dw_i2c_dev *dev { struct i2c_adapter *adap = &dev->adapter; int r; - u32 reg; init_completion(&dev->cmd_complete); @@@ -968,6 -971,26 +968,6 @@@ if (r) return r; - r = i2c_dw_acquire_lock(dev); - if (r) - return r; - - /* - * Test if dynamic TAR update is enabled in this controller by writing - * to IC_10BITADDR_MASTER field in IC_CON: when it is enabled this - * field is read-only so it should not succeed - */ - reg = dw_readl(dev, DW_IC_CON); - dw_writel(dev, reg ^ DW_IC_CON_10BITADDR_MASTER, DW_IC_CON); - - if ((dw_readl(dev, DW_IC_CON) & DW_IC_CON_10BITADDR_MASTER) == - (reg & DW_IC_CON_10BITADDR_MASTER)) { - dev->dynamic_tar_update_enabled = true; - dev_dbg(dev->dev, "Dynamic TAR update enabled"); - } - - i2c_dw_release_lock(dev); - snprintf(adap->name, sizeof(adap->name), "Synopsys DesignWare I2C adapter"); adap->retries = 3; diff --combined drivers/i2c/busses/i2c-designware-core.h index c1db3a5,a8e74ca..091c249 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@@ -23,6 -23,7 +23,7 @@@ */ #include + #include #define DW_IC_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C | \ I2C_FUNC_SMBUS_BYTE | \ @@@ -75,6 -76,7 +76,7 @@@ * @fp_lcnt: fast plus LCNT value * @hs_hcnt: high speed HCNT value * @hs_lcnt: high speed LCNT value + * @pm_qos: pm_qos_request used while holding a hardware lock on the bus * @acquire_lock: function to acquire a hardware lock on the bus * @release_lock: function to release a hardware lock on the bus * @pm_runtime_disabled: true if pm runtime is disabled @@@ -103,7 -105,7 +105,7 @@@ struct dw_i2c_dev unsigned int status; u32 abort_source; int irq; - u32 accessor_flags; + u32 flags; struct i2c_adapter adapter; u32 functionality; u32 master_cfg; @@@ -122,15 -124,19 +124,18 @@@ u16 fp_lcnt; u16 hs_hcnt; u16 hs_lcnt; + struct pm_qos_request pm_qos; int (*acquire_lock)(struct dw_i2c_dev *dev); void (*release_lock)(struct dw_i2c_dev *dev); bool pm_runtime_disabled; - bool dynamic_tar_update_enabled; }; #define ACCESS_SWAP 0x00000001 #define ACCESS_16BIT 0x00000002 #define ACCESS_INTR_MASK 0x00000004 + #define MODEL_CHERRYTRAIL 0x00000100 + extern int i2c_dw_init(struct dw_i2c_dev *dev); extern void i2c_dw_disable(struct dw_i2c_dev *dev); extern void i2c_dw_disable_int(struct dw_i2c_dev *dev); @@@ -138,7 -144,9 +143,9 @@@ extern u32 i2c_dw_read_comp_param(struc extern int i2c_dw_probe(struct dw_i2c_dev *dev); #if IS_ENABLED(CONFIG_I2C_DESIGNWARE_BAYTRAIL) - extern int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev); + extern int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev); + extern void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev); #else - static inline int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev) { return 0; } + static inline int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev) { return 0; } + static inline void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev) {} #endif