2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
49 #include "amdgpu_pm.h"
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
59 #include "ivsrcid/ivsrcid_vislands30.h"
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
108 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
111 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
123 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125 * requests into DC requests, and DC responses into DRM responses.
127 * The root control structure is &struct amdgpu_display_manager.
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
134 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 switch (link->dpcd_caps.dongle_type) {
137 case DISPLAY_DONGLE_NONE:
138 return DRM_MODE_SUBCONNECTOR_Native;
139 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140 return DRM_MODE_SUBCONNECTOR_VGA;
141 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142 case DISPLAY_DONGLE_DP_DVI_DONGLE:
143 return DRM_MODE_SUBCONNECTOR_DVID;
144 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146 return DRM_MODE_SUBCONNECTOR_HDMIA;
147 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149 return DRM_MODE_SUBCONNECTOR_Unknown;
153 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 struct dc_link *link = aconnector->dc_link;
156 struct drm_connector *connector = &aconnector->base;
157 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
162 if (aconnector->dc_sink)
163 subconnector = get_subconnector_type(link);
165 drm_object_property_set_value(&connector->base,
166 connector->dev->mode_config.dp_subconnector_property,
171 * initializes drm_device display related structures, based on the information
172 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173 * drm_encoder, drm_mode_config
175 * Returns 0 on success
177 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178 /* removes and deallocates the drm structures, created by the above function */
179 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
182 struct drm_plane *plane,
183 unsigned long possible_crtcs,
184 const struct dc_plane_cap *plane_cap);
185 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186 struct drm_plane *plane,
187 uint32_t link_index);
188 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189 struct amdgpu_dm_connector *amdgpu_dm_connector,
191 struct amdgpu_encoder *amdgpu_encoder);
192 static int amdgpu_dm_encoder_init(struct drm_device *dev,
193 struct amdgpu_encoder *aencoder,
194 uint32_t link_index);
196 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 struct drm_atomic_state *state);
203 static void handle_cursor_update(struct drm_plane *plane,
204 struct drm_plane_state *old_plane_state);
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
212 static const struct drm_format_info *
213 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
216 * dm_vblank_get_counter
219 * Get counter for number of vertical blanks
222 * struct amdgpu_device *adev - [in] desired amdgpu device
223 * int disp_idx - [in] which CRTC to get the counter from
226 * Counter for vertical blanks
228 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
230 if (crtc >= adev->mode_info.num_crtc)
233 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
235 if (acrtc->dm_irq_params.stream == NULL) {
236 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
241 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
245 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
246 u32 *vbl, u32 *position)
248 uint32_t v_blank_start, v_blank_end, h_position, v_position;
250 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
253 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
255 if (acrtc->dm_irq_params.stream == NULL) {
256 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
262 * TODO rework base driver to use values directly.
263 * for now parse it back into reg-format
265 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
271 *position = v_position | (h_position << 16);
272 *vbl = v_blank_start | (v_blank_end << 16);
278 static bool dm_is_idle(void *handle)
284 static int dm_wait_for_idle(void *handle)
290 static bool dm_check_soft_reset(void *handle)
295 static int dm_soft_reset(void *handle)
301 static struct amdgpu_crtc *
302 get_crtc_by_otg_inst(struct amdgpu_device *adev,
305 struct drm_device *dev = adev_to_drm(adev);
306 struct drm_crtc *crtc;
307 struct amdgpu_crtc *amdgpu_crtc;
309 if (otg_inst == -1) {
311 return adev->mode_info.crtcs[0];
314 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
315 amdgpu_crtc = to_amdgpu_crtc(crtc);
317 if (amdgpu_crtc->otg_inst == otg_inst)
324 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
326 return acrtc->dm_irq_params.freesync_config.state ==
327 VRR_STATE_ACTIVE_VARIABLE ||
328 acrtc->dm_irq_params.freesync_config.state ==
329 VRR_STATE_ACTIVE_FIXED;
332 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
334 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
335 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
339 * dm_pflip_high_irq() - Handle pageflip interrupt
340 * @interrupt_params: ignored
342 * Handles the pageflip interrupt by notifying all interested parties
343 * that the pageflip has been completed.
345 static void dm_pflip_high_irq(void *interrupt_params)
347 struct amdgpu_crtc *amdgpu_crtc;
348 struct common_irq_params *irq_params = interrupt_params;
349 struct amdgpu_device *adev = irq_params->adev;
351 struct drm_pending_vblank_event *e;
352 uint32_t vpos, hpos, v_blank_start, v_blank_end;
355 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
357 /* IRQ could occur when in initial stage */
358 /* TODO work and BO cleanup */
359 if (amdgpu_crtc == NULL) {
360 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
364 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
366 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
367 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
368 amdgpu_crtc->pflip_status,
369 AMDGPU_FLIP_SUBMITTED,
370 amdgpu_crtc->crtc_id,
372 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
376 /* page flip completed. */
377 e = amdgpu_crtc->event;
378 amdgpu_crtc->event = NULL;
383 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
385 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
387 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
388 &v_blank_end, &hpos, &vpos) ||
389 (vpos < v_blank_start)) {
390 /* Update to correct count and vblank timestamp if racing with
391 * vblank irq. This also updates to the correct vblank timestamp
392 * even in VRR mode, as scanout is past the front-porch atm.
394 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
396 /* Wake up userspace by sending the pageflip event with proper
397 * count and timestamp of vblank of flip completion.
400 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
402 /* Event sent, so done with vblank for this flip */
403 drm_crtc_vblank_put(&amdgpu_crtc->base);
406 /* VRR active and inside front-porch: vblank count and
407 * timestamp for pageflip event will only be up to date after
408 * drm_crtc_handle_vblank() has been executed from late vblank
409 * irq handler after start of back-porch (vline 0). We queue the
410 * pageflip event for send-out by drm_crtc_handle_vblank() with
411 * updated timestamp and count, once it runs after us.
413 * We need to open-code this instead of using the helper
414 * drm_crtc_arm_vblank_event(), as that helper would
415 * call drm_crtc_accurate_vblank_count(), which we must
416 * not call in VRR mode while we are in front-porch!
419 /* sequence will be replaced by real count during send-out. */
420 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
421 e->pipe = amdgpu_crtc->crtc_id;
423 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
427 /* Keep track of vblank of this flip for flip throttling. We use the
428 * cooked hw counter, as that one incremented at start of this vblank
429 * of pageflip completion, so last_flip_vblank is the forbidden count
430 * for queueing new pageflips if vsync + VRR is enabled.
432 amdgpu_crtc->dm_irq_params.last_flip_vblank =
433 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
435 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
436 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
438 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
439 amdgpu_crtc->crtc_id, amdgpu_crtc,
440 vrr_active, (int) !e);
443 static void dm_vupdate_high_irq(void *interrupt_params)
445 struct common_irq_params *irq_params = interrupt_params;
446 struct amdgpu_device *adev = irq_params->adev;
447 struct amdgpu_crtc *acrtc;
451 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
454 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
456 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
460 /* Core vblank handling is done here after end of front-porch in
461 * vrr mode, as vblank timestamping will give valid results
462 * while now done after front-porch. This will also deliver
463 * page-flip completion events that have been queued to us
464 * if a pageflip happened inside front-porch.
467 drm_crtc_handle_vblank(&acrtc->base);
469 /* BTR processing for pre-DCE12 ASICs */
470 if (acrtc->dm_irq_params.stream &&
471 adev->family < AMDGPU_FAMILY_AI) {
472 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
473 mod_freesync_handle_v_update(
474 adev->dm.freesync_module,
475 acrtc->dm_irq_params.stream,
476 &acrtc->dm_irq_params.vrr_params);
478 dc_stream_adjust_vmin_vmax(
480 acrtc->dm_irq_params.stream,
481 &acrtc->dm_irq_params.vrr_params.adjust);
482 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
489 * dm_crtc_high_irq() - Handles CRTC interrupt
490 * @interrupt_params: used for determining the CRTC instance
492 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
495 static void dm_crtc_high_irq(void *interrupt_params)
497 struct common_irq_params *irq_params = interrupt_params;
498 struct amdgpu_device *adev = irq_params->adev;
499 struct amdgpu_crtc *acrtc;
503 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
507 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
509 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
510 vrr_active, acrtc->dm_irq_params.active_planes);
513 * Core vblank handling at start of front-porch is only possible
514 * in non-vrr mode, as only there vblank timestamping will give
515 * valid results while done in front-porch. Otherwise defer it
516 * to dm_vupdate_high_irq after end of front-porch.
519 drm_crtc_handle_vblank(&acrtc->base);
522 * Following stuff must happen at start of vblank, for crc
523 * computation and below-the-range btr support in vrr mode.
525 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
527 /* BTR updates need to happen before VUPDATE on Vega and above. */
528 if (adev->family < AMDGPU_FAMILY_AI)
531 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
533 if (acrtc->dm_irq_params.stream &&
534 acrtc->dm_irq_params.vrr_params.supported &&
535 acrtc->dm_irq_params.freesync_config.state ==
536 VRR_STATE_ACTIVE_VARIABLE) {
537 mod_freesync_handle_v_update(adev->dm.freesync_module,
538 acrtc->dm_irq_params.stream,
539 &acrtc->dm_irq_params.vrr_params);
541 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
542 &acrtc->dm_irq_params.vrr_params.adjust);
546 * If there aren't any active_planes then DCH HUBP may be clock-gated.
547 * In that case, pageflip completion interrupts won't fire and pageflip
548 * completion events won't get delivered. Prevent this by sending
549 * pending pageflip events from here if a flip is still pending.
551 * If any planes are enabled, use dm_pflip_high_irq() instead, to
552 * avoid race conditions between flip programming and completion,
553 * which could cause too early flip completion events.
555 if (adev->family >= AMDGPU_FAMILY_RV &&
556 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
557 acrtc->dm_irq_params.active_planes == 0) {
559 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
561 drm_crtc_vblank_put(&acrtc->base);
563 acrtc->pflip_status = AMDGPU_FLIP_NONE;
566 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
569 static int dm_set_clockgating_state(void *handle,
570 enum amd_clockgating_state state)
575 static int dm_set_powergating_state(void *handle,
576 enum amd_powergating_state state)
581 /* Prototypes of private functions */
582 static int dm_early_init(void* handle);
584 /* Allocate memory for FBC compressed data */
585 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
587 struct drm_device *dev = connector->dev;
588 struct amdgpu_device *adev = drm_to_adev(dev);
589 struct dm_compressor_info *compressor = &adev->dm.compressor;
590 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
591 struct drm_display_mode *mode;
592 unsigned long max_size = 0;
594 if (adev->dm.dc->fbc_compressor == NULL)
597 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
600 if (compressor->bo_ptr)
604 list_for_each_entry(mode, &connector->modes, head) {
605 if (max_size < mode->htotal * mode->vtotal)
606 max_size = mode->htotal * mode->vtotal;
610 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
611 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
612 &compressor->gpu_addr, &compressor->cpu_addr);
615 DRM_ERROR("DM: Failed to initialize FBC\n");
617 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
618 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
625 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
626 int pipe, bool *enabled,
627 unsigned char *buf, int max_bytes)
629 struct drm_device *dev = dev_get_drvdata(kdev);
630 struct amdgpu_device *adev = drm_to_adev(dev);
631 struct drm_connector *connector;
632 struct drm_connector_list_iter conn_iter;
633 struct amdgpu_dm_connector *aconnector;
638 mutex_lock(&adev->dm.audio_lock);
640 drm_connector_list_iter_begin(dev, &conn_iter);
641 drm_for_each_connector_iter(connector, &conn_iter) {
642 aconnector = to_amdgpu_dm_connector(connector);
643 if (aconnector->audio_inst != port)
647 ret = drm_eld_size(connector->eld);
648 memcpy(buf, connector->eld, min(max_bytes, ret));
652 drm_connector_list_iter_end(&conn_iter);
654 mutex_unlock(&adev->dm.audio_lock);
656 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
661 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
662 .get_eld = amdgpu_dm_audio_component_get_eld,
665 static int amdgpu_dm_audio_component_bind(struct device *kdev,
666 struct device *hda_kdev, void *data)
668 struct drm_device *dev = dev_get_drvdata(kdev);
669 struct amdgpu_device *adev = drm_to_adev(dev);
670 struct drm_audio_component *acomp = data;
672 acomp->ops = &amdgpu_dm_audio_component_ops;
674 adev->dm.audio_component = acomp;
679 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
680 struct device *hda_kdev, void *data)
682 struct drm_device *dev = dev_get_drvdata(kdev);
683 struct amdgpu_device *adev = drm_to_adev(dev);
684 struct drm_audio_component *acomp = data;
688 adev->dm.audio_component = NULL;
691 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
692 .bind = amdgpu_dm_audio_component_bind,
693 .unbind = amdgpu_dm_audio_component_unbind,
696 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
703 adev->mode_info.audio.enabled = true;
705 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
707 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
708 adev->mode_info.audio.pin[i].channels = -1;
709 adev->mode_info.audio.pin[i].rate = -1;
710 adev->mode_info.audio.pin[i].bits_per_sample = -1;
711 adev->mode_info.audio.pin[i].status_bits = 0;
712 adev->mode_info.audio.pin[i].category_code = 0;
713 adev->mode_info.audio.pin[i].connected = false;
714 adev->mode_info.audio.pin[i].id =
715 adev->dm.dc->res_pool->audios[i]->inst;
716 adev->mode_info.audio.pin[i].offset = 0;
719 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
723 adev->dm.audio_registered = true;
728 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
733 if (!adev->mode_info.audio.enabled)
736 if (adev->dm.audio_registered) {
737 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
738 adev->dm.audio_registered = false;
741 /* TODO: Disable audio? */
743 adev->mode_info.audio.enabled = false;
746 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
748 struct drm_audio_component *acomp = adev->dm.audio_component;
750 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
751 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
753 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
758 static int dm_dmub_hw_init(struct amdgpu_device *adev)
760 const struct dmcub_firmware_header_v1_0 *hdr;
761 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
762 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
763 const struct firmware *dmub_fw = adev->dm.dmub_fw;
764 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
765 struct abm *abm = adev->dm.dc->res_pool->abm;
766 struct dmub_srv_hw_params hw_params;
767 enum dmub_status status;
768 const unsigned char *fw_inst_const, *fw_bss_data;
769 uint32_t i, fw_inst_const_size, fw_bss_data_size;
773 /* DMUB isn't supported on the ASIC. */
777 DRM_ERROR("No framebuffer info for DMUB service.\n");
782 /* Firmware required for DMUB support. */
783 DRM_ERROR("No firmware provided for DMUB.\n");
787 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
788 if (status != DMUB_STATUS_OK) {
789 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
793 if (!has_hw_support) {
794 DRM_INFO("DMUB unsupported on ASIC\n");
798 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
800 fw_inst_const = dmub_fw->data +
801 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
804 fw_bss_data = dmub_fw->data +
805 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806 le32_to_cpu(hdr->inst_const_bytes);
808 /* Copy firmware and bios info into FB memory. */
809 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
810 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
812 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
814 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
815 * amdgpu_ucode_init_single_fw will load dmub firmware
816 * fw_inst_const part to cw0; otherwise, the firmware back door load
817 * will be done by dm_dmub_hw_init
819 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
820 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
824 if (fw_bss_data_size)
825 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
826 fw_bss_data, fw_bss_data_size);
828 /* Copy firmware bios info into FB memory. */
829 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
832 /* Reset regions that need to be reset. */
833 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
834 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
836 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
837 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
839 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
840 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
842 /* Initialize hardware. */
843 memset(&hw_params, 0, sizeof(hw_params));
844 hw_params.fb_base = adev->gmc.fb_start;
845 hw_params.fb_offset = adev->gmc.aper_base;
847 /* backdoor load firmware and trigger dmub running */
848 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
849 hw_params.load_inst_const = true;
852 hw_params.psp_version = dmcu->psp_version;
854 for (i = 0; i < fb_info->num_fb; ++i)
855 hw_params.fb[i] = &fb_info->fb[i];
857 status = dmub_srv_hw_init(dmub_srv, &hw_params);
858 if (status != DMUB_STATUS_OK) {
859 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
863 /* Wait for firmware load to finish. */
864 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
865 if (status != DMUB_STATUS_OK)
866 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
868 /* Init DMCU and ABM if available. */
870 dmcu->funcs->dmcu_init(dmcu);
871 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
874 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875 if (!adev->dm.dc->ctx->dmub_srv) {
876 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
880 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881 adev->dm.dmcub_fw_version);
886 #if defined(CONFIG_DRM_AMD_DC_DCN)
887 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
890 uint32_t logical_addr_low;
891 uint32_t logical_addr_high;
892 uint32_t agp_base, agp_bot, agp_top;
893 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
895 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
896 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
898 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
900 * Raven2 has a HW issue that it is unable to use the vram which
901 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
902 * workaround that increase system aperture high address (add 1)
903 * to get rid of the VM fault and hardware hang.
905 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
907 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
910 agp_bot = adev->gmc.agp_start >> 24;
911 agp_top = adev->gmc.agp_end >> 24;
914 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
915 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
916 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
917 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
918 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
919 page_table_base.low_part = lower_32_bits(pt_base);
921 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
922 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
924 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
925 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
926 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
928 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
929 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
930 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
932 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
933 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
934 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
936 pa_config->is_hvm_enabled = 0;
940 #if defined(CONFIG_DRM_AMD_DC_DCN)
941 static void event_mall_stutter(struct work_struct *work)
944 struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
945 struct amdgpu_display_manager *dm = vblank_work->dm;
947 mutex_lock(&dm->dc_lock);
949 if (vblank_work->enable)
950 dm->active_vblank_irq_count++;
952 dm->active_vblank_irq_count--;
955 dc_allow_idle_optimizations(
956 dm->dc, dm->active_vblank_irq_count == 0 ? true : false);
958 DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
961 mutex_unlock(&dm->dc_lock);
964 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
967 int max_caps = dc->caps.max_links;
968 struct vblank_workqueue *vblank_work;
971 vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
972 if (ZERO_OR_NULL_PTR(vblank_work)) {
977 for (i = 0; i < max_caps; i++)
978 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
983 static int amdgpu_dm_init(struct amdgpu_device *adev)
985 struct dc_init_data init_data;
986 #ifdef CONFIG_DRM_AMD_DC_HDCP
987 struct dc_callback_init init_params;
991 adev->dm.ddev = adev_to_drm(adev);
992 adev->dm.adev = adev;
994 /* Zero all the fields */
995 memset(&init_data, 0, sizeof(init_data));
996 #ifdef CONFIG_DRM_AMD_DC_HDCP
997 memset(&init_params, 0, sizeof(init_params));
1000 mutex_init(&adev->dm.dc_lock);
1001 mutex_init(&adev->dm.audio_lock);
1002 #if defined(CONFIG_DRM_AMD_DC_DCN)
1003 spin_lock_init(&adev->dm.vblank_lock);
1006 if(amdgpu_dm_irq_init(adev)) {
1007 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1011 init_data.asic_id.chip_family = adev->family;
1013 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1014 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1016 init_data.asic_id.vram_width = adev->gmc.vram_width;
1017 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1018 init_data.asic_id.atombios_base_address =
1019 adev->mode_info.atom_context->bios;
1021 init_data.driver = adev;
1023 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1025 if (!adev->dm.cgs_device) {
1026 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1030 init_data.cgs_device = adev->dm.cgs_device;
1032 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1034 switch (adev->asic_type) {
1039 init_data.flags.gpu_vm_support = true;
1040 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1041 init_data.flags.disable_dmcu = true;
1043 #if defined(CONFIG_DRM_AMD_DC_DCN)
1045 init_data.flags.gpu_vm_support = true;
1052 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1053 init_data.flags.fbc_support = true;
1055 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1056 init_data.flags.multi_mon_pp_mclk_switch = true;
1058 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1059 init_data.flags.disable_fractional_pwm = true;
1061 init_data.flags.power_down_display_on_boot = true;
1063 /* Display Core create. */
1064 adev->dm.dc = dc_create(&init_data);
1067 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1069 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1073 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1074 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1075 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1078 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1079 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1081 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1082 adev->dm.dc->debug.disable_stutter = true;
1084 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1085 adev->dm.dc->debug.disable_dsc = true;
1087 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1088 adev->dm.dc->debug.disable_clock_gate = true;
1090 r = dm_dmub_hw_init(adev);
1092 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1096 dc_hardware_init(adev->dm.dc);
1098 #if defined(CONFIG_DRM_AMD_DC_DCN)
1099 if (adev->apu_flags) {
1100 struct dc_phy_addr_space_config pa_config;
1102 mmhub_read_system_context(adev, &pa_config);
1104 // Call the DC init_memory func
1105 dc_setup_system_context(adev->dm.dc, &pa_config);
1109 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1110 if (!adev->dm.freesync_module) {
1112 "amdgpu: failed to initialize freesync_module.\n");
1114 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1115 adev->dm.freesync_module);
1117 amdgpu_dm_init_color_mod();
1119 #if defined(CONFIG_DRM_AMD_DC_DCN)
1120 if (adev->dm.dc->caps.max_links > 0) {
1121 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1123 if (!adev->dm.vblank_workqueue)
1124 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1126 DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1130 #ifdef CONFIG_DRM_AMD_DC_HDCP
1131 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1132 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1134 if (!adev->dm.hdcp_workqueue)
1135 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1137 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1139 dc_init_callbacks(adev->dm.dc, &init_params);
1142 if (amdgpu_dm_initialize_drm_device(adev)) {
1144 "amdgpu: failed to initialize sw for display support.\n");
1148 /* create fake encoders for MST */
1149 dm_dp_create_fake_mst_encoders(adev);
1151 /* TODO: Add_display_info? */
1153 /* TODO use dynamic cursor width */
1154 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1155 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1157 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1159 "amdgpu: failed to initialize sw for display support.\n");
1164 DRM_DEBUG_DRIVER("KMS initialized.\n");
1168 amdgpu_dm_fini(adev);
1173 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1177 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1178 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1181 amdgpu_dm_audio_fini(adev);
1183 amdgpu_dm_destroy_drm_device(&adev->dm);
1185 #ifdef CONFIG_DRM_AMD_DC_HDCP
1186 if (adev->dm.hdcp_workqueue) {
1187 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1188 adev->dm.hdcp_workqueue = NULL;
1192 dc_deinit_callbacks(adev->dm.dc);
1194 if (adev->dm.dc->ctx->dmub_srv) {
1195 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1196 adev->dm.dc->ctx->dmub_srv = NULL;
1199 if (adev->dm.dmub_bo)
1200 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1201 &adev->dm.dmub_bo_gpu_addr,
1202 &adev->dm.dmub_bo_cpu_addr);
1204 /* DC Destroy TODO: Replace destroy DAL */
1206 dc_destroy(&adev->dm.dc);
1208 * TODO: pageflip, vlank interrupt
1210 * amdgpu_dm_irq_fini(adev);
1213 if (adev->dm.cgs_device) {
1214 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1215 adev->dm.cgs_device = NULL;
1217 if (adev->dm.freesync_module) {
1218 mod_freesync_destroy(adev->dm.freesync_module);
1219 adev->dm.freesync_module = NULL;
1222 mutex_destroy(&adev->dm.audio_lock);
1223 mutex_destroy(&adev->dm.dc_lock);
1228 static int load_dmcu_fw(struct amdgpu_device *adev)
1230 const char *fw_name_dmcu = NULL;
1232 const struct dmcu_firmware_header_v1_0 *hdr;
1234 switch(adev->asic_type) {
1235 #if defined(CONFIG_DRM_AMD_DC_SI)
1250 case CHIP_POLARIS11:
1251 case CHIP_POLARIS10:
1252 case CHIP_POLARIS12:
1260 case CHIP_SIENNA_CICHLID:
1261 case CHIP_NAVY_FLOUNDER:
1262 case CHIP_DIMGREY_CAVEFISH:
1266 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1269 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1270 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1271 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1272 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1277 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1281 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1282 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1286 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1288 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1289 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1290 adev->dm.fw_dmcu = NULL;
1294 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1299 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1301 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1303 release_firmware(adev->dm.fw_dmcu);
1304 adev->dm.fw_dmcu = NULL;
1308 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1309 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1310 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1311 adev->firmware.fw_size +=
1312 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1314 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1315 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1316 adev->firmware.fw_size +=
1317 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1319 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1321 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1326 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1328 struct amdgpu_device *adev = ctx;
1330 return dm_read_reg(adev->dm.dc->ctx, address);
1333 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1336 struct amdgpu_device *adev = ctx;
1338 return dm_write_reg(adev->dm.dc->ctx, address, value);
1341 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1343 struct dmub_srv_create_params create_params;
1344 struct dmub_srv_region_params region_params;
1345 struct dmub_srv_region_info region_info;
1346 struct dmub_srv_fb_params fb_params;
1347 struct dmub_srv_fb_info *fb_info;
1348 struct dmub_srv *dmub_srv;
1349 const struct dmcub_firmware_header_v1_0 *hdr;
1350 const char *fw_name_dmub;
1351 enum dmub_asic dmub_asic;
1352 enum dmub_status status;
1355 switch (adev->asic_type) {
1357 dmub_asic = DMUB_ASIC_DCN21;
1358 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1359 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1360 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1362 case CHIP_SIENNA_CICHLID:
1363 dmub_asic = DMUB_ASIC_DCN30;
1364 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1366 case CHIP_NAVY_FLOUNDER:
1367 dmub_asic = DMUB_ASIC_DCN30;
1368 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1371 dmub_asic = DMUB_ASIC_DCN301;
1372 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1374 case CHIP_DIMGREY_CAVEFISH:
1375 dmub_asic = DMUB_ASIC_DCN302;
1376 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1380 /* ASIC doesn't support DMUB. */
1384 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1386 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1390 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1392 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1396 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1398 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1399 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1400 AMDGPU_UCODE_ID_DMCUB;
1401 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1403 adev->firmware.fw_size +=
1404 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1406 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1407 adev->dm.dmcub_fw_version);
1410 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1412 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1413 dmub_srv = adev->dm.dmub_srv;
1416 DRM_ERROR("Failed to allocate DMUB service!\n");
1420 memset(&create_params, 0, sizeof(create_params));
1421 create_params.user_ctx = adev;
1422 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1423 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1424 create_params.asic = dmub_asic;
1426 /* Create the DMUB service. */
1427 status = dmub_srv_create(dmub_srv, &create_params);
1428 if (status != DMUB_STATUS_OK) {
1429 DRM_ERROR("Error creating DMUB service: %d\n", status);
1433 /* Calculate the size of all the regions for the DMUB service. */
1434 memset(®ion_params, 0, sizeof(region_params));
1436 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1437 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1438 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1439 region_params.vbios_size = adev->bios_size;
1440 region_params.fw_bss_data = region_params.bss_data_size ?
1441 adev->dm.dmub_fw->data +
1442 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1443 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1444 region_params.fw_inst_const =
1445 adev->dm.dmub_fw->data +
1446 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1449 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1452 if (status != DMUB_STATUS_OK) {
1453 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1458 * Allocate a framebuffer based on the total size of all the regions.
1459 * TODO: Move this into GART.
1461 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1462 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1463 &adev->dm.dmub_bo_gpu_addr,
1464 &adev->dm.dmub_bo_cpu_addr);
1468 /* Rebase the regions on the framebuffer address. */
1469 memset(&fb_params, 0, sizeof(fb_params));
1470 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1471 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1472 fb_params.region_info = ®ion_info;
1474 adev->dm.dmub_fb_info =
1475 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1476 fb_info = adev->dm.dmub_fb_info;
1480 "Failed to allocate framebuffer info for DMUB service!\n");
1484 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1485 if (status != DMUB_STATUS_OK) {
1486 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1493 static int dm_sw_init(void *handle)
1495 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1498 r = dm_dmub_sw_init(adev);
1502 return load_dmcu_fw(adev);
1505 static int dm_sw_fini(void *handle)
1507 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1509 kfree(adev->dm.dmub_fb_info);
1510 adev->dm.dmub_fb_info = NULL;
1512 if (adev->dm.dmub_srv) {
1513 dmub_srv_destroy(adev->dm.dmub_srv);
1514 adev->dm.dmub_srv = NULL;
1517 release_firmware(adev->dm.dmub_fw);
1518 adev->dm.dmub_fw = NULL;
1520 release_firmware(adev->dm.fw_dmcu);
1521 adev->dm.fw_dmcu = NULL;
1526 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1528 struct amdgpu_dm_connector *aconnector;
1529 struct drm_connector *connector;
1530 struct drm_connector_list_iter iter;
1533 drm_connector_list_iter_begin(dev, &iter);
1534 drm_for_each_connector_iter(connector, &iter) {
1535 aconnector = to_amdgpu_dm_connector(connector);
1536 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1537 aconnector->mst_mgr.aux) {
1538 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1540 aconnector->base.base.id);
1542 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1544 DRM_ERROR("DM_MST: Failed to start MST\n");
1545 aconnector->dc_link->type =
1546 dc_connection_single;
1551 drm_connector_list_iter_end(&iter);
1556 static int dm_late_init(void *handle)
1558 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1560 struct dmcu_iram_parameters params;
1561 unsigned int linear_lut[16];
1563 struct dmcu *dmcu = NULL;
1566 dmcu = adev->dm.dc->res_pool->dmcu;
1568 for (i = 0; i < 16; i++)
1569 linear_lut[i] = 0xFFFF * i / 15;
1572 params.backlight_ramping_start = 0xCCCC;
1573 params.backlight_ramping_reduction = 0xCCCCCCCC;
1574 params.backlight_lut_array_size = 16;
1575 params.backlight_lut_array = linear_lut;
1577 /* Min backlight level after ABM reduction, Don't allow below 1%
1578 * 0xFFFF x 0.01 = 0x28F
1580 params.min_abm_backlight = 0x28F;
1582 /* In the case where abm is implemented on dmcub,
1583 * dmcu object will be null.
1584 * ABM 2.4 and up are implemented on dmcub.
1587 ret = dmcu_load_iram(dmcu, params);
1588 else if (adev->dm.dc->ctx->dmub_srv)
1589 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1594 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1597 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1599 struct amdgpu_dm_connector *aconnector;
1600 struct drm_connector *connector;
1601 struct drm_connector_list_iter iter;
1602 struct drm_dp_mst_topology_mgr *mgr;
1604 bool need_hotplug = false;
1606 drm_connector_list_iter_begin(dev, &iter);
1607 drm_for_each_connector_iter(connector, &iter) {
1608 aconnector = to_amdgpu_dm_connector(connector);
1609 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1610 aconnector->mst_port)
1613 mgr = &aconnector->mst_mgr;
1616 drm_dp_mst_topology_mgr_suspend(mgr);
1618 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1620 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1621 need_hotplug = true;
1625 drm_connector_list_iter_end(&iter);
1628 drm_kms_helper_hotplug_event(dev);
1631 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1633 struct smu_context *smu = &adev->smu;
1636 if (!is_support_sw_smu(adev))
1639 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1640 * on window driver dc implementation.
1641 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1642 * should be passed to smu during boot up and resume from s3.
1643 * boot up: dc calculate dcn watermark clock settings within dc_create,
1644 * dcn20_resource_construct
1645 * then call pplib functions below to pass the settings to smu:
1646 * smu_set_watermarks_for_clock_ranges
1647 * smu_set_watermarks_table
1648 * navi10_set_watermarks_table
1649 * smu_write_watermarks_table
1651 * For Renoir, clock settings of dcn watermark are also fixed values.
1652 * dc has implemented different flow for window driver:
1653 * dc_hardware_init / dc_set_power_state
1658 * smu_set_watermarks_for_clock_ranges
1659 * renoir_set_watermarks_table
1660 * smu_write_watermarks_table
1663 * dc_hardware_init -> amdgpu_dm_init
1664 * dc_set_power_state --> dm_resume
1666 * therefore, this function apply to navi10/12/14 but not Renoir
1669 switch(adev->asic_type) {
1678 ret = smu_write_watermarks_table(smu);
1680 DRM_ERROR("Failed to update WMTABLE!\n");
1688 * dm_hw_init() - Initialize DC device
1689 * @handle: The base driver device containing the amdgpu_dm device.
1691 * Initialize the &struct amdgpu_display_manager device. This involves calling
1692 * the initializers of each DM component, then populating the struct with them.
1694 * Although the function implies hardware initialization, both hardware and
1695 * software are initialized here. Splitting them out to their relevant init
1696 * hooks is a future TODO item.
1698 * Some notable things that are initialized here:
1700 * - Display Core, both software and hardware
1701 * - DC modules that we need (freesync and color management)
1702 * - DRM software states
1703 * - Interrupt sources and handlers
1705 * - Debug FS entries, if enabled
1707 static int dm_hw_init(void *handle)
1709 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1710 /* Create DAL display manager */
1711 amdgpu_dm_init(adev);
1712 amdgpu_dm_hpd_init(adev);
1718 * dm_hw_fini() - Teardown DC device
1719 * @handle: The base driver device containing the amdgpu_dm device.
1721 * Teardown components within &struct amdgpu_display_manager that require
1722 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1723 * were loaded. Also flush IRQ workqueues and disable them.
1725 static int dm_hw_fini(void *handle)
1727 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1729 amdgpu_dm_hpd_fini(adev);
1731 amdgpu_dm_irq_fini(adev);
1732 amdgpu_dm_fini(adev);
1737 static int dm_enable_vblank(struct drm_crtc *crtc);
1738 static void dm_disable_vblank(struct drm_crtc *crtc);
1740 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1741 struct dc_state *state, bool enable)
1743 enum dc_irq_source irq_source;
1744 struct amdgpu_crtc *acrtc;
1748 for (i = 0; i < state->stream_count; i++) {
1749 acrtc = get_crtc_by_otg_inst(
1750 adev, state->stream_status[i].primary_otg_inst);
1752 if (acrtc && state->stream_status[i].plane_count != 0) {
1753 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1754 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1755 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1756 acrtc->crtc_id, enable ? "en" : "dis", rc);
1758 DRM_WARN("Failed to %s pflip interrupts\n",
1759 enable ? "enable" : "disable");
1762 rc = dm_enable_vblank(&acrtc->base);
1764 DRM_WARN("Failed to enable vblank interrupts\n");
1766 dm_disable_vblank(&acrtc->base);
1774 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1776 struct dc_state *context = NULL;
1777 enum dc_status res = DC_ERROR_UNEXPECTED;
1779 struct dc_stream_state *del_streams[MAX_PIPES];
1780 int del_streams_count = 0;
1782 memset(del_streams, 0, sizeof(del_streams));
1784 context = dc_create_state(dc);
1785 if (context == NULL)
1786 goto context_alloc_fail;
1788 dc_resource_state_copy_construct_current(dc, context);
1790 /* First remove from context all streams */
1791 for (i = 0; i < context->stream_count; i++) {
1792 struct dc_stream_state *stream = context->streams[i];
1794 del_streams[del_streams_count++] = stream;
1797 /* Remove all planes for removed streams and then remove the streams */
1798 for (i = 0; i < del_streams_count; i++) {
1799 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1800 res = DC_FAIL_DETACH_SURFACES;
1804 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1810 res = dc_validate_global_state(dc, context, false);
1813 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1817 res = dc_commit_state(dc, context);
1820 dc_release_state(context);
1826 static int dm_suspend(void *handle)
1828 struct amdgpu_device *adev = handle;
1829 struct amdgpu_display_manager *dm = &adev->dm;
1832 if (amdgpu_in_reset(adev)) {
1833 mutex_lock(&dm->dc_lock);
1835 #if defined(CONFIG_DRM_AMD_DC_DCN)
1836 dc_allow_idle_optimizations(adev->dm.dc, false);
1839 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1841 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1843 amdgpu_dm_commit_zero_streams(dm->dc);
1845 amdgpu_dm_irq_suspend(adev);
1850 WARN_ON(adev->dm.cached_state);
1851 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1853 s3_handle_mst(adev_to_drm(adev), true);
1855 amdgpu_dm_irq_suspend(adev);
1858 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1863 static struct amdgpu_dm_connector *
1864 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1865 struct drm_crtc *crtc)
1868 struct drm_connector_state *new_con_state;
1869 struct drm_connector *connector;
1870 struct drm_crtc *crtc_from_state;
1872 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1873 crtc_from_state = new_con_state->crtc;
1875 if (crtc_from_state == crtc)
1876 return to_amdgpu_dm_connector(connector);
1882 static void emulated_link_detect(struct dc_link *link)
1884 struct dc_sink_init_data sink_init_data = { 0 };
1885 struct display_sink_capability sink_caps = { 0 };
1886 enum dc_edid_status edid_status;
1887 struct dc_context *dc_ctx = link->ctx;
1888 struct dc_sink *sink = NULL;
1889 struct dc_sink *prev_sink = NULL;
1891 link->type = dc_connection_none;
1892 prev_sink = link->local_sink;
1895 dc_sink_release(prev_sink);
1897 switch (link->connector_signal) {
1898 case SIGNAL_TYPE_HDMI_TYPE_A: {
1899 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1900 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1904 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1905 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1906 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1910 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1911 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1912 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1916 case SIGNAL_TYPE_LVDS: {
1917 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1918 sink_caps.signal = SIGNAL_TYPE_LVDS;
1922 case SIGNAL_TYPE_EDP: {
1923 sink_caps.transaction_type =
1924 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1925 sink_caps.signal = SIGNAL_TYPE_EDP;
1929 case SIGNAL_TYPE_DISPLAY_PORT: {
1930 sink_caps.transaction_type =
1931 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1932 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1937 DC_ERROR("Invalid connector type! signal:%d\n",
1938 link->connector_signal);
1942 sink_init_data.link = link;
1943 sink_init_data.sink_signal = sink_caps.signal;
1945 sink = dc_sink_create(&sink_init_data);
1947 DC_ERROR("Failed to create sink!\n");
1951 /* dc_sink_create returns a new reference */
1952 link->local_sink = sink;
1954 edid_status = dm_helpers_read_local_edid(
1959 if (edid_status != EDID_OK)
1960 DC_ERROR("Failed to read EDID");
1964 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1965 struct amdgpu_display_manager *dm)
1968 struct dc_surface_update surface_updates[MAX_SURFACES];
1969 struct dc_plane_info plane_infos[MAX_SURFACES];
1970 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1971 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1972 struct dc_stream_update stream_update;
1976 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1979 dm_error("Failed to allocate update bundle\n");
1983 for (k = 0; k < dc_state->stream_count; k++) {
1984 bundle->stream_update.stream = dc_state->streams[k];
1986 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1987 bundle->surface_updates[m].surface =
1988 dc_state->stream_status->plane_states[m];
1989 bundle->surface_updates[m].surface->force_full_update =
1992 dc_commit_updates_for_stream(
1993 dm->dc, bundle->surface_updates,
1994 dc_state->stream_status->plane_count,
1995 dc_state->streams[k], &bundle->stream_update, dc_state);
2004 static void dm_set_dpms_off(struct dc_link *link)
2006 struct dc_stream_state *stream_state;
2007 struct amdgpu_dm_connector *aconnector = link->priv;
2008 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2009 struct dc_stream_update stream_update;
2010 bool dpms_off = true;
2012 memset(&stream_update, 0, sizeof(stream_update));
2013 stream_update.dpms_off = &dpms_off;
2015 mutex_lock(&adev->dm.dc_lock);
2016 stream_state = dc_stream_find_from_link(link);
2018 if (stream_state == NULL) {
2019 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2020 mutex_unlock(&adev->dm.dc_lock);
2024 stream_update.stream = stream_state;
2025 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2026 stream_state, &stream_update,
2027 stream_state->ctx->dc->current_state);
2028 mutex_unlock(&adev->dm.dc_lock);
2031 static int dm_resume(void *handle)
2033 struct amdgpu_device *adev = handle;
2034 struct drm_device *ddev = adev_to_drm(adev);
2035 struct amdgpu_display_manager *dm = &adev->dm;
2036 struct amdgpu_dm_connector *aconnector;
2037 struct drm_connector *connector;
2038 struct drm_connector_list_iter iter;
2039 struct drm_crtc *crtc;
2040 struct drm_crtc_state *new_crtc_state;
2041 struct dm_crtc_state *dm_new_crtc_state;
2042 struct drm_plane *plane;
2043 struct drm_plane_state *new_plane_state;
2044 struct dm_plane_state *dm_new_plane_state;
2045 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2046 enum dc_connection_type new_connection_type = dc_connection_none;
2047 struct dc_state *dc_state;
2050 if (amdgpu_in_reset(adev)) {
2051 dc_state = dm->cached_dc_state;
2053 r = dm_dmub_hw_init(adev);
2055 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2057 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2060 amdgpu_dm_irq_resume_early(adev);
2062 for (i = 0; i < dc_state->stream_count; i++) {
2063 dc_state->streams[i]->mode_changed = true;
2064 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2065 dc_state->stream_status->plane_states[j]->update_flags.raw
2070 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2072 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2074 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2076 dc_release_state(dm->cached_dc_state);
2077 dm->cached_dc_state = NULL;
2079 amdgpu_dm_irq_resume_late(adev);
2081 mutex_unlock(&dm->dc_lock);
2085 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2086 dc_release_state(dm_state->context);
2087 dm_state->context = dc_create_state(dm->dc);
2088 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2089 dc_resource_state_construct(dm->dc, dm_state->context);
2091 /* Before powering on DC we need to re-initialize DMUB. */
2092 r = dm_dmub_hw_init(adev);
2094 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2096 /* power on hardware */
2097 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2099 /* program HPD filter */
2103 * early enable HPD Rx IRQ, should be done before set mode as short
2104 * pulse interrupts are used for MST
2106 amdgpu_dm_irq_resume_early(adev);
2108 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2109 s3_handle_mst(ddev, false);
2112 drm_connector_list_iter_begin(ddev, &iter);
2113 drm_for_each_connector_iter(connector, &iter) {
2114 aconnector = to_amdgpu_dm_connector(connector);
2117 * this is the case when traversing through already created
2118 * MST connectors, should be skipped
2120 if (aconnector->mst_port)
2123 mutex_lock(&aconnector->hpd_lock);
2124 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2125 DRM_ERROR("KMS: Failed to detect connector\n");
2127 if (aconnector->base.force && new_connection_type == dc_connection_none)
2128 emulated_link_detect(aconnector->dc_link);
2130 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2132 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2133 aconnector->fake_enable = false;
2135 if (aconnector->dc_sink)
2136 dc_sink_release(aconnector->dc_sink);
2137 aconnector->dc_sink = NULL;
2138 amdgpu_dm_update_connector_after_detect(aconnector);
2139 mutex_unlock(&aconnector->hpd_lock);
2141 drm_connector_list_iter_end(&iter);
2143 /* Force mode set in atomic commit */
2144 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2145 new_crtc_state->active_changed = true;
2148 * atomic_check is expected to create the dc states. We need to release
2149 * them here, since they were duplicated as part of the suspend
2152 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2153 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2154 if (dm_new_crtc_state->stream) {
2155 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2156 dc_stream_release(dm_new_crtc_state->stream);
2157 dm_new_crtc_state->stream = NULL;
2161 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2162 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2163 if (dm_new_plane_state->dc_state) {
2164 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2165 dc_plane_state_release(dm_new_plane_state->dc_state);
2166 dm_new_plane_state->dc_state = NULL;
2170 drm_atomic_helper_resume(ddev, dm->cached_state);
2172 dm->cached_state = NULL;
2174 amdgpu_dm_irq_resume_late(adev);
2176 amdgpu_dm_smu_write_watermarks_table(adev);
2184 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2185 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2186 * the base driver's device list to be initialized and torn down accordingly.
2188 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2191 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2193 .early_init = dm_early_init,
2194 .late_init = dm_late_init,
2195 .sw_init = dm_sw_init,
2196 .sw_fini = dm_sw_fini,
2197 .hw_init = dm_hw_init,
2198 .hw_fini = dm_hw_fini,
2199 .suspend = dm_suspend,
2200 .resume = dm_resume,
2201 .is_idle = dm_is_idle,
2202 .wait_for_idle = dm_wait_for_idle,
2203 .check_soft_reset = dm_check_soft_reset,
2204 .soft_reset = dm_soft_reset,
2205 .set_clockgating_state = dm_set_clockgating_state,
2206 .set_powergating_state = dm_set_powergating_state,
2209 const struct amdgpu_ip_block_version dm_ip_block =
2211 .type = AMD_IP_BLOCK_TYPE_DCE,
2215 .funcs = &amdgpu_dm_funcs,
2225 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2226 .fb_create = amdgpu_display_user_framebuffer_create,
2227 .get_format_info = amd_get_format_info,
2228 .output_poll_changed = drm_fb_helper_output_poll_changed,
2229 .atomic_check = amdgpu_dm_atomic_check,
2230 .atomic_commit = drm_atomic_helper_commit,
2233 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2234 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2237 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2239 u32 max_cll, min_cll, max, min, q, r;
2240 struct amdgpu_dm_backlight_caps *caps;
2241 struct amdgpu_display_manager *dm;
2242 struct drm_connector *conn_base;
2243 struct amdgpu_device *adev;
2244 struct dc_link *link = NULL;
2245 static const u8 pre_computed_values[] = {
2246 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2247 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2249 if (!aconnector || !aconnector->dc_link)
2252 link = aconnector->dc_link;
2253 if (link->connector_signal != SIGNAL_TYPE_EDP)
2256 conn_base = &aconnector->base;
2257 adev = drm_to_adev(conn_base->dev);
2259 caps = &dm->backlight_caps;
2260 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2261 caps->aux_support = false;
2262 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2263 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2265 if (caps->ext_caps->bits.oled == 1 ||
2266 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2267 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2268 caps->aux_support = true;
2270 if (amdgpu_backlight == 0)
2271 caps->aux_support = false;
2272 else if (amdgpu_backlight == 1)
2273 caps->aux_support = true;
2275 /* From the specification (CTA-861-G), for calculating the maximum
2276 * luminance we need to use:
2277 * Luminance = 50*2**(CV/32)
2278 * Where CV is a one-byte value.
2279 * For calculating this expression we may need float point precision;
2280 * to avoid this complexity level, we take advantage that CV is divided
2281 * by a constant. From the Euclids division algorithm, we know that CV
2282 * can be written as: CV = 32*q + r. Next, we replace CV in the
2283 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2284 * need to pre-compute the value of r/32. For pre-computing the values
2285 * We just used the following Ruby line:
2286 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2287 * The results of the above expressions can be verified at
2288 * pre_computed_values.
2292 max = (1 << q) * pre_computed_values[r];
2294 // min luminance: maxLum * (CV/255)^2 / 100
2295 q = DIV_ROUND_CLOSEST(min_cll, 255);
2296 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2298 caps->aux_max_input_signal = max;
2299 caps->aux_min_input_signal = min;
2302 void amdgpu_dm_update_connector_after_detect(
2303 struct amdgpu_dm_connector *aconnector)
2305 struct drm_connector *connector = &aconnector->base;
2306 struct drm_device *dev = connector->dev;
2307 struct dc_sink *sink;
2309 /* MST handled by drm_mst framework */
2310 if (aconnector->mst_mgr.mst_state == true)
2313 sink = aconnector->dc_link->local_sink;
2315 dc_sink_retain(sink);
2318 * Edid mgmt connector gets first update only in mode_valid hook and then
2319 * the connector sink is set to either fake or physical sink depends on link status.
2320 * Skip if already done during boot.
2322 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2323 && aconnector->dc_em_sink) {
2326 * For S3 resume with headless use eml_sink to fake stream
2327 * because on resume connector->sink is set to NULL
2329 mutex_lock(&dev->mode_config.mutex);
2332 if (aconnector->dc_sink) {
2333 amdgpu_dm_update_freesync_caps(connector, NULL);
2335 * retain and release below are used to
2336 * bump up refcount for sink because the link doesn't point
2337 * to it anymore after disconnect, so on next crtc to connector
2338 * reshuffle by UMD we will get into unwanted dc_sink release
2340 dc_sink_release(aconnector->dc_sink);
2342 aconnector->dc_sink = sink;
2343 dc_sink_retain(aconnector->dc_sink);
2344 amdgpu_dm_update_freesync_caps(connector,
2347 amdgpu_dm_update_freesync_caps(connector, NULL);
2348 if (!aconnector->dc_sink) {
2349 aconnector->dc_sink = aconnector->dc_em_sink;
2350 dc_sink_retain(aconnector->dc_sink);
2354 mutex_unlock(&dev->mode_config.mutex);
2357 dc_sink_release(sink);
2362 * TODO: temporary guard to look for proper fix
2363 * if this sink is MST sink, we should not do anything
2365 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2366 dc_sink_release(sink);
2370 if (aconnector->dc_sink == sink) {
2372 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2375 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2376 aconnector->connector_id);
2378 dc_sink_release(sink);
2382 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2383 aconnector->connector_id, aconnector->dc_sink, sink);
2385 mutex_lock(&dev->mode_config.mutex);
2388 * 1. Update status of the drm connector
2389 * 2. Send an event and let userspace tell us what to do
2393 * TODO: check if we still need the S3 mode update workaround.
2394 * If yes, put it here.
2396 if (aconnector->dc_sink) {
2397 amdgpu_dm_update_freesync_caps(connector, NULL);
2398 dc_sink_release(aconnector->dc_sink);
2401 aconnector->dc_sink = sink;
2402 dc_sink_retain(aconnector->dc_sink);
2403 if (sink->dc_edid.length == 0) {
2404 aconnector->edid = NULL;
2405 if (aconnector->dc_link->aux_mode) {
2406 drm_dp_cec_unset_edid(
2407 &aconnector->dm_dp_aux.aux);
2411 (struct edid *)sink->dc_edid.raw_edid;
2413 drm_connector_update_edid_property(connector,
2415 if (aconnector->dc_link->aux_mode)
2416 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2420 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2421 update_connector_ext_caps(aconnector);
2423 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2424 amdgpu_dm_update_freesync_caps(connector, NULL);
2425 drm_connector_update_edid_property(connector, NULL);
2426 aconnector->num_modes = 0;
2427 dc_sink_release(aconnector->dc_sink);
2428 aconnector->dc_sink = NULL;
2429 aconnector->edid = NULL;
2430 #ifdef CONFIG_DRM_AMD_DC_HDCP
2431 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2432 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2433 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2437 mutex_unlock(&dev->mode_config.mutex);
2439 update_subconnector_property(aconnector);
2442 dc_sink_release(sink);
2445 static void handle_hpd_irq(void *param)
2447 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2448 struct drm_connector *connector = &aconnector->base;
2449 struct drm_device *dev = connector->dev;
2450 enum dc_connection_type new_connection_type = dc_connection_none;
2451 #ifdef CONFIG_DRM_AMD_DC_HDCP
2452 struct amdgpu_device *adev = drm_to_adev(dev);
2453 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2457 * In case of failure or MST no need to update connector status or notify the OS
2458 * since (for MST case) MST does this in its own context.
2460 mutex_lock(&aconnector->hpd_lock);
2462 #ifdef CONFIG_DRM_AMD_DC_HDCP
2463 if (adev->dm.hdcp_workqueue) {
2464 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2465 dm_con_state->update_hdcp = true;
2468 if (aconnector->fake_enable)
2469 aconnector->fake_enable = false;
2471 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2472 DRM_ERROR("KMS: Failed to detect connector\n");
2474 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2475 emulated_link_detect(aconnector->dc_link);
2478 drm_modeset_lock_all(dev);
2479 dm_restore_drm_connector_state(dev, connector);
2480 drm_modeset_unlock_all(dev);
2482 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2483 drm_kms_helper_hotplug_event(dev);
2485 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2486 if (new_connection_type == dc_connection_none &&
2487 aconnector->dc_link->type == dc_connection_none)
2488 dm_set_dpms_off(aconnector->dc_link);
2490 amdgpu_dm_update_connector_after_detect(aconnector);
2492 drm_modeset_lock_all(dev);
2493 dm_restore_drm_connector_state(dev, connector);
2494 drm_modeset_unlock_all(dev);
2496 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2497 drm_kms_helper_hotplug_event(dev);
2499 mutex_unlock(&aconnector->hpd_lock);
2503 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2505 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2507 bool new_irq_handled = false;
2509 int dpcd_bytes_to_read;
2511 const int max_process_count = 30;
2512 int process_count = 0;
2514 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2516 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2517 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2518 /* DPCD 0x200 - 0x201 for downstream IRQ */
2519 dpcd_addr = DP_SINK_COUNT;
2521 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2522 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2523 dpcd_addr = DP_SINK_COUNT_ESI;
2526 dret = drm_dp_dpcd_read(
2527 &aconnector->dm_dp_aux.aux,
2530 dpcd_bytes_to_read);
2532 while (dret == dpcd_bytes_to_read &&
2533 process_count < max_process_count) {
2539 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2540 /* handle HPD short pulse irq */
2541 if (aconnector->mst_mgr.mst_state)
2543 &aconnector->mst_mgr,
2547 if (new_irq_handled) {
2548 /* ACK at DPCD to notify down stream */
2549 const int ack_dpcd_bytes_to_write =
2550 dpcd_bytes_to_read - 1;
2552 for (retry = 0; retry < 3; retry++) {
2555 wret = drm_dp_dpcd_write(
2556 &aconnector->dm_dp_aux.aux,
2559 ack_dpcd_bytes_to_write);
2560 if (wret == ack_dpcd_bytes_to_write)
2564 /* check if there is new irq to be handled */
2565 dret = drm_dp_dpcd_read(
2566 &aconnector->dm_dp_aux.aux,
2569 dpcd_bytes_to_read);
2571 new_irq_handled = false;
2577 if (process_count == max_process_count)
2578 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2581 static void handle_hpd_rx_irq(void *param)
2583 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2584 struct drm_connector *connector = &aconnector->base;
2585 struct drm_device *dev = connector->dev;
2586 struct dc_link *dc_link = aconnector->dc_link;
2587 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2588 bool result = false;
2589 enum dc_connection_type new_connection_type = dc_connection_none;
2590 struct amdgpu_device *adev = drm_to_adev(dev);
2591 union hpd_irq_data hpd_irq_data;
2593 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2596 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2597 * conflict, after implement i2c helper, this mutex should be
2600 if (dc_link->type != dc_connection_mst_branch)
2601 mutex_lock(&aconnector->hpd_lock);
2603 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2605 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2606 (dc_link->type == dc_connection_mst_branch)) {
2607 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2609 dm_handle_hpd_rx_irq(aconnector);
2611 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2613 dm_handle_hpd_rx_irq(aconnector);
2618 mutex_lock(&adev->dm.dc_lock);
2619 #ifdef CONFIG_DRM_AMD_DC_HDCP
2620 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2622 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2624 mutex_unlock(&adev->dm.dc_lock);
2627 if (result && !is_mst_root_connector) {
2628 /* Downstream Port status changed. */
2629 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2630 DRM_ERROR("KMS: Failed to detect connector\n");
2632 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2633 emulated_link_detect(dc_link);
2635 if (aconnector->fake_enable)
2636 aconnector->fake_enable = false;
2638 amdgpu_dm_update_connector_after_detect(aconnector);
2641 drm_modeset_lock_all(dev);
2642 dm_restore_drm_connector_state(dev, connector);
2643 drm_modeset_unlock_all(dev);
2645 drm_kms_helper_hotplug_event(dev);
2646 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2648 if (aconnector->fake_enable)
2649 aconnector->fake_enable = false;
2651 amdgpu_dm_update_connector_after_detect(aconnector);
2654 drm_modeset_lock_all(dev);
2655 dm_restore_drm_connector_state(dev, connector);
2656 drm_modeset_unlock_all(dev);
2658 drm_kms_helper_hotplug_event(dev);
2661 #ifdef CONFIG_DRM_AMD_DC_HDCP
2662 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2663 if (adev->dm.hdcp_workqueue)
2664 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2668 if (dc_link->type != dc_connection_mst_branch) {
2669 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2670 mutex_unlock(&aconnector->hpd_lock);
2674 static void register_hpd_handlers(struct amdgpu_device *adev)
2676 struct drm_device *dev = adev_to_drm(adev);
2677 struct drm_connector *connector;
2678 struct amdgpu_dm_connector *aconnector;
2679 const struct dc_link *dc_link;
2680 struct dc_interrupt_params int_params = {0};
2682 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2683 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2685 list_for_each_entry(connector,
2686 &dev->mode_config.connector_list, head) {
2688 aconnector = to_amdgpu_dm_connector(connector);
2689 dc_link = aconnector->dc_link;
2691 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2692 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2693 int_params.irq_source = dc_link->irq_source_hpd;
2695 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2697 (void *) aconnector);
2700 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2702 /* Also register for DP short pulse (hpd_rx). */
2703 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2704 int_params.irq_source = dc_link->irq_source_hpd_rx;
2706 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2708 (void *) aconnector);
2713 #if defined(CONFIG_DRM_AMD_DC_SI)
2714 /* Register IRQ sources and initialize IRQ callbacks */
2715 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2717 struct dc *dc = adev->dm.dc;
2718 struct common_irq_params *c_irq_params;
2719 struct dc_interrupt_params int_params = {0};
2722 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2724 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2725 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2728 * Actions of amdgpu_irq_add_id():
2729 * 1. Register a set() function with base driver.
2730 * Base driver will call set() function to enable/disable an
2731 * interrupt in DC hardware.
2732 * 2. Register amdgpu_dm_irq_handler().
2733 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2734 * coming from DC hardware.
2735 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2736 * for acknowledging and handling. */
2738 /* Use VBLANK interrupt */
2739 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2740 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2742 DRM_ERROR("Failed to add crtc irq id!\n");
2746 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2747 int_params.irq_source =
2748 dc_interrupt_to_irq_source(dc, i+1 , 0);
2750 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2752 c_irq_params->adev = adev;
2753 c_irq_params->irq_src = int_params.irq_source;
2755 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2756 dm_crtc_high_irq, c_irq_params);
2759 /* Use GRPH_PFLIP interrupt */
2760 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2761 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2762 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2764 DRM_ERROR("Failed to add page flip irq id!\n");
2768 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2769 int_params.irq_source =
2770 dc_interrupt_to_irq_source(dc, i, 0);
2772 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2774 c_irq_params->adev = adev;
2775 c_irq_params->irq_src = int_params.irq_source;
2777 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2778 dm_pflip_high_irq, c_irq_params);
2783 r = amdgpu_irq_add_id(adev, client_id,
2784 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2786 DRM_ERROR("Failed to add hpd irq id!\n");
2790 register_hpd_handlers(adev);
2796 /* Register IRQ sources and initialize IRQ callbacks */
2797 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2799 struct dc *dc = adev->dm.dc;
2800 struct common_irq_params *c_irq_params;
2801 struct dc_interrupt_params int_params = {0};
2804 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2806 if (adev->asic_type >= CHIP_VEGA10)
2807 client_id = SOC15_IH_CLIENTID_DCE;
2809 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2810 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2813 * Actions of amdgpu_irq_add_id():
2814 * 1. Register a set() function with base driver.
2815 * Base driver will call set() function to enable/disable an
2816 * interrupt in DC hardware.
2817 * 2. Register amdgpu_dm_irq_handler().
2818 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2819 * coming from DC hardware.
2820 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2821 * for acknowledging and handling. */
2823 /* Use VBLANK interrupt */
2824 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2825 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2827 DRM_ERROR("Failed to add crtc irq id!\n");
2831 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2832 int_params.irq_source =
2833 dc_interrupt_to_irq_source(dc, i, 0);
2835 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2837 c_irq_params->adev = adev;
2838 c_irq_params->irq_src = int_params.irq_source;
2840 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2841 dm_crtc_high_irq, c_irq_params);
2844 /* Use VUPDATE interrupt */
2845 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2846 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2848 DRM_ERROR("Failed to add vupdate irq id!\n");
2852 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2853 int_params.irq_source =
2854 dc_interrupt_to_irq_source(dc, i, 0);
2856 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2858 c_irq_params->adev = adev;
2859 c_irq_params->irq_src = int_params.irq_source;
2861 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2862 dm_vupdate_high_irq, c_irq_params);
2865 /* Use GRPH_PFLIP interrupt */
2866 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2867 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2868 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2870 DRM_ERROR("Failed to add page flip irq id!\n");
2874 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2875 int_params.irq_source =
2876 dc_interrupt_to_irq_source(dc, i, 0);
2878 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2880 c_irq_params->adev = adev;
2881 c_irq_params->irq_src = int_params.irq_source;
2883 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2884 dm_pflip_high_irq, c_irq_params);
2889 r = amdgpu_irq_add_id(adev, client_id,
2890 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2892 DRM_ERROR("Failed to add hpd irq id!\n");
2896 register_hpd_handlers(adev);
2901 #if defined(CONFIG_DRM_AMD_DC_DCN)
2902 /* Register IRQ sources and initialize IRQ callbacks */
2903 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2905 struct dc *dc = adev->dm.dc;
2906 struct common_irq_params *c_irq_params;
2907 struct dc_interrupt_params int_params = {0};
2911 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2912 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2915 * Actions of amdgpu_irq_add_id():
2916 * 1. Register a set() function with base driver.
2917 * Base driver will call set() function to enable/disable an
2918 * interrupt in DC hardware.
2919 * 2. Register amdgpu_dm_irq_handler().
2920 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2921 * coming from DC hardware.
2922 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2923 * for acknowledging and handling.
2926 /* Use VSTARTUP interrupt */
2927 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2928 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2930 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2933 DRM_ERROR("Failed to add crtc irq id!\n");
2937 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2938 int_params.irq_source =
2939 dc_interrupt_to_irq_source(dc, i, 0);
2941 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2943 c_irq_params->adev = adev;
2944 c_irq_params->irq_src = int_params.irq_source;
2946 amdgpu_dm_irq_register_interrupt(
2947 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2950 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2951 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2952 * to trigger at end of each vblank, regardless of state of the lock,
2953 * matching DCE behaviour.
2955 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2956 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2958 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2961 DRM_ERROR("Failed to add vupdate irq id!\n");
2965 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2966 int_params.irq_source =
2967 dc_interrupt_to_irq_source(dc, i, 0);
2969 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2971 c_irq_params->adev = adev;
2972 c_irq_params->irq_src = int_params.irq_source;
2974 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2975 dm_vupdate_high_irq, c_irq_params);
2978 /* Use GRPH_PFLIP interrupt */
2979 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2980 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2982 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2984 DRM_ERROR("Failed to add page flip irq id!\n");
2988 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2989 int_params.irq_source =
2990 dc_interrupt_to_irq_source(dc, i, 0);
2992 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2994 c_irq_params->adev = adev;
2995 c_irq_params->irq_src = int_params.irq_source;
2997 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2998 dm_pflip_high_irq, c_irq_params);
3003 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3006 DRM_ERROR("Failed to add hpd irq id!\n");
3010 register_hpd_handlers(adev);
3017 * Acquires the lock for the atomic state object and returns
3018 * the new atomic state.
3020 * This should only be called during atomic check.
3022 static int dm_atomic_get_state(struct drm_atomic_state *state,
3023 struct dm_atomic_state **dm_state)
3025 struct drm_device *dev = state->dev;
3026 struct amdgpu_device *adev = drm_to_adev(dev);
3027 struct amdgpu_display_manager *dm = &adev->dm;
3028 struct drm_private_state *priv_state;
3033 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3034 if (IS_ERR(priv_state))
3035 return PTR_ERR(priv_state);
3037 *dm_state = to_dm_atomic_state(priv_state);
3042 static struct dm_atomic_state *
3043 dm_atomic_get_new_state(struct drm_atomic_state *state)
3045 struct drm_device *dev = state->dev;
3046 struct amdgpu_device *adev = drm_to_adev(dev);
3047 struct amdgpu_display_manager *dm = &adev->dm;
3048 struct drm_private_obj *obj;
3049 struct drm_private_state *new_obj_state;
3052 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3053 if (obj->funcs == dm->atomic_obj.funcs)
3054 return to_dm_atomic_state(new_obj_state);
3060 static struct drm_private_state *
3061 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3063 struct dm_atomic_state *old_state, *new_state;
3065 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3069 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3071 old_state = to_dm_atomic_state(obj->state);
3073 if (old_state && old_state->context)
3074 new_state->context = dc_copy_state(old_state->context);
3076 if (!new_state->context) {
3081 return &new_state->base;
3084 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3085 struct drm_private_state *state)
3087 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3089 if (dm_state && dm_state->context)
3090 dc_release_state(dm_state->context);
3095 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3096 .atomic_duplicate_state = dm_atomic_duplicate_state,
3097 .atomic_destroy_state = dm_atomic_destroy_state,
3100 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3102 struct dm_atomic_state *state;
3105 adev->mode_info.mode_config_initialized = true;
3107 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3108 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3110 adev_to_drm(adev)->mode_config.max_width = 16384;
3111 adev_to_drm(adev)->mode_config.max_height = 16384;
3113 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3114 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3115 /* indicates support for immediate flip */
3116 adev_to_drm(adev)->mode_config.async_page_flip = true;
3118 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3120 state = kzalloc(sizeof(*state), GFP_KERNEL);
3124 state->context = dc_create_state(adev->dm.dc);
3125 if (!state->context) {
3130 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3132 drm_atomic_private_obj_init(adev_to_drm(adev),
3133 &adev->dm.atomic_obj,
3135 &dm_atomic_state_funcs);
3137 r = amdgpu_display_modeset_create_props(adev);
3139 dc_release_state(state->context);
3144 r = amdgpu_dm_audio_init(adev);
3146 dc_release_state(state->context);
3154 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3155 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3156 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3158 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3159 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3161 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3163 #if defined(CONFIG_ACPI)
3164 struct amdgpu_dm_backlight_caps caps;
3166 memset(&caps, 0, sizeof(caps));
3168 if (dm->backlight_caps.caps_valid)
3171 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3172 if (caps.caps_valid) {
3173 dm->backlight_caps.caps_valid = true;
3174 if (caps.aux_support)
3176 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3177 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3179 dm->backlight_caps.min_input_signal =
3180 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3181 dm->backlight_caps.max_input_signal =
3182 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3185 if (dm->backlight_caps.aux_support)
3188 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3189 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3193 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3194 unsigned *min, unsigned *max)
3199 if (caps->aux_support) {
3200 // Firmware limits are in nits, DC API wants millinits.
3201 *max = 1000 * caps->aux_max_input_signal;
3202 *min = 1000 * caps->aux_min_input_signal;
3204 // Firmware limits are 8-bit, PWM control is 16-bit.
3205 *max = 0x101 * caps->max_input_signal;
3206 *min = 0x101 * caps->min_input_signal;
3211 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3212 uint32_t brightness)
3216 if (!get_brightness_range(caps, &min, &max))
3219 // Rescale 0..255 to min..max
3220 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3221 AMDGPU_MAX_BL_LEVEL);
3224 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3225 uint32_t brightness)
3229 if (!get_brightness_range(caps, &min, &max))
3232 if (brightness < min)
3234 // Rescale min..max to 0..255
3235 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3239 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3241 struct amdgpu_display_manager *dm = bl_get_data(bd);
3242 struct amdgpu_dm_backlight_caps caps;
3243 struct dc_link *link = NULL;
3247 amdgpu_dm_update_backlight_caps(dm);
3248 caps = dm->backlight_caps;
3250 link = (struct dc_link *)dm->backlight_link;
3252 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3253 // Change brightness based on AUX property
3254 if (caps.aux_support)
3255 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3256 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3258 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3263 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3265 struct amdgpu_display_manager *dm = bl_get_data(bd);
3266 struct amdgpu_dm_backlight_caps caps;
3268 amdgpu_dm_update_backlight_caps(dm);
3269 caps = dm->backlight_caps;
3271 if (caps.aux_support) {
3272 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3276 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3278 return bd->props.brightness;
3279 return convert_brightness_to_user(&caps, avg);
3281 int ret = dc_link_get_backlight_level(dm->backlight_link);
3283 if (ret == DC_ERROR_UNEXPECTED)
3284 return bd->props.brightness;
3285 return convert_brightness_to_user(&caps, ret);
3289 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3290 .options = BL_CORE_SUSPENDRESUME,
3291 .get_brightness = amdgpu_dm_backlight_get_brightness,
3292 .update_status = amdgpu_dm_backlight_update_status,
3296 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3299 struct backlight_properties props = { 0 };
3301 amdgpu_dm_update_backlight_caps(dm);
3303 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3304 props.brightness = AMDGPU_MAX_BL_LEVEL;
3305 props.type = BACKLIGHT_RAW;
3307 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3308 adev_to_drm(dm->adev)->primary->index);
3310 dm->backlight_dev = backlight_device_register(bl_name,
3311 adev_to_drm(dm->adev)->dev,
3313 &amdgpu_dm_backlight_ops,
3316 if (IS_ERR(dm->backlight_dev))
3317 DRM_ERROR("DM: Backlight registration failed!\n");
3319 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3324 static int initialize_plane(struct amdgpu_display_manager *dm,
3325 struct amdgpu_mode_info *mode_info, int plane_id,
3326 enum drm_plane_type plane_type,
3327 const struct dc_plane_cap *plane_cap)
3329 struct drm_plane *plane;
3330 unsigned long possible_crtcs;
3333 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3335 DRM_ERROR("KMS: Failed to allocate plane\n");
3338 plane->type = plane_type;
3341 * HACK: IGT tests expect that the primary plane for a CRTC
3342 * can only have one possible CRTC. Only expose support for
3343 * any CRTC if they're not going to be used as a primary plane
3344 * for a CRTC - like overlay or underlay planes.
3346 possible_crtcs = 1 << plane_id;
3347 if (plane_id >= dm->dc->caps.max_streams)
3348 possible_crtcs = 0xff;
3350 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3353 DRM_ERROR("KMS: Failed to initialize plane\n");
3359 mode_info->planes[plane_id] = plane;
3365 static void register_backlight_device(struct amdgpu_display_manager *dm,
3366 struct dc_link *link)
3368 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3369 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3371 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3372 link->type != dc_connection_none) {
3374 * Event if registration failed, we should continue with
3375 * DM initialization because not having a backlight control
3376 * is better then a black screen.
3378 amdgpu_dm_register_backlight_device(dm);
3380 if (dm->backlight_dev)
3381 dm->backlight_link = link;
3388 * In this architecture, the association
3389 * connector -> encoder -> crtc
3390 * id not really requried. The crtc and connector will hold the
3391 * display_index as an abstraction to use with DAL component
3393 * Returns 0 on success
3395 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3397 struct amdgpu_display_manager *dm = &adev->dm;
3399 struct amdgpu_dm_connector *aconnector = NULL;
3400 struct amdgpu_encoder *aencoder = NULL;
3401 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3403 int32_t primary_planes;
3404 enum dc_connection_type new_connection_type = dc_connection_none;
3405 const struct dc_plane_cap *plane;
3407 dm->display_indexes_num = dm->dc->caps.max_streams;
3408 /* Update the actual used number of crtc */
3409 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3411 link_cnt = dm->dc->caps.max_links;
3412 if (amdgpu_dm_mode_config_init(dm->adev)) {
3413 DRM_ERROR("DM: Failed to initialize mode config\n");
3417 /* There is one primary plane per CRTC */
3418 primary_planes = dm->dc->caps.max_streams;
3419 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3422 * Initialize primary planes, implicit planes for legacy IOCTLS.
3423 * Order is reversed to match iteration order in atomic check.
3425 for (i = (primary_planes - 1); i >= 0; i--) {
3426 plane = &dm->dc->caps.planes[i];
3428 if (initialize_plane(dm, mode_info, i,
3429 DRM_PLANE_TYPE_PRIMARY, plane)) {
3430 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3436 * Initialize overlay planes, index starting after primary planes.
3437 * These planes have a higher DRM index than the primary planes since
3438 * they should be considered as having a higher z-order.
3439 * Order is reversed to match iteration order in atomic check.
3441 * Only support DCN for now, and only expose one so we don't encourage
3442 * userspace to use up all the pipes.
3444 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3445 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3447 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3450 if (!plane->blends_with_above || !plane->blends_with_below)
3453 if (!plane->pixel_format_support.argb8888)
3456 if (initialize_plane(dm, NULL, primary_planes + i,
3457 DRM_PLANE_TYPE_OVERLAY, plane)) {
3458 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3462 /* Only create one overlay plane. */
3466 for (i = 0; i < dm->dc->caps.max_streams; i++)
3467 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3468 DRM_ERROR("KMS: Failed to initialize crtc\n");
3472 /* loops over all connectors on the board */
3473 for (i = 0; i < link_cnt; i++) {
3474 struct dc_link *link = NULL;
3476 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3478 "KMS: Cannot support more than %d display indexes\n",
3479 AMDGPU_DM_MAX_DISPLAY_INDEX);
3483 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3487 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3491 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3492 DRM_ERROR("KMS: Failed to initialize encoder\n");
3496 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3497 DRM_ERROR("KMS: Failed to initialize connector\n");
3501 link = dc_get_link_at_index(dm->dc, i);
3503 if (!dc_link_detect_sink(link, &new_connection_type))
3504 DRM_ERROR("KMS: Failed to detect connector\n");
3506 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3507 emulated_link_detect(link);
3508 amdgpu_dm_update_connector_after_detect(aconnector);
3510 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3511 amdgpu_dm_update_connector_after_detect(aconnector);
3512 register_backlight_device(dm, link);
3513 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3514 amdgpu_dm_set_psr_caps(link);
3520 /* Software is initialized. Now we can register interrupt handlers. */
3521 switch (adev->asic_type) {
3522 #if defined(CONFIG_DRM_AMD_DC_SI)
3527 if (dce60_register_irq_handlers(dm->adev)) {
3528 DRM_ERROR("DM: Failed to initialize IRQ\n");
3542 case CHIP_POLARIS11:
3543 case CHIP_POLARIS10:
3544 case CHIP_POLARIS12:
3549 if (dce110_register_irq_handlers(dm->adev)) {
3550 DRM_ERROR("DM: Failed to initialize IRQ\n");
3554 #if defined(CONFIG_DRM_AMD_DC_DCN)
3560 case CHIP_SIENNA_CICHLID:
3561 case CHIP_NAVY_FLOUNDER:
3562 case CHIP_DIMGREY_CAVEFISH:
3564 if (dcn10_register_irq_handlers(dm->adev)) {
3565 DRM_ERROR("DM: Failed to initialize IRQ\n");
3571 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3583 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3585 drm_mode_config_cleanup(dm->ddev);
3586 drm_atomic_private_obj_fini(&dm->atomic_obj);
3590 /******************************************************************************
3591 * amdgpu_display_funcs functions
3592 *****************************************************************************/
3595 * dm_bandwidth_update - program display watermarks
3597 * @adev: amdgpu_device pointer
3599 * Calculate and program the display watermarks and line buffer allocation.
3601 static void dm_bandwidth_update(struct amdgpu_device *adev)
3603 /* TODO: implement later */
3606 static const struct amdgpu_display_funcs dm_display_funcs = {
3607 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3608 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3609 .backlight_set_level = NULL, /* never called for DC */
3610 .backlight_get_level = NULL, /* never called for DC */
3611 .hpd_sense = NULL,/* called unconditionally */
3612 .hpd_set_polarity = NULL, /* called unconditionally */
3613 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3614 .page_flip_get_scanoutpos =
3615 dm_crtc_get_scanoutpos,/* called unconditionally */
3616 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3617 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3620 #if defined(CONFIG_DEBUG_KERNEL_DC)
3622 static ssize_t s3_debug_store(struct device *device,
3623 struct device_attribute *attr,
3629 struct drm_device *drm_dev = dev_get_drvdata(device);
3630 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3632 ret = kstrtoint(buf, 0, &s3_state);
3637 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3642 return ret == 0 ? count : 0;
3645 DEVICE_ATTR_WO(s3_debug);
3649 static int dm_early_init(void *handle)
3651 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3653 switch (adev->asic_type) {
3654 #if defined(CONFIG_DRM_AMD_DC_SI)
3658 adev->mode_info.num_crtc = 6;
3659 adev->mode_info.num_hpd = 6;
3660 adev->mode_info.num_dig = 6;
3663 adev->mode_info.num_crtc = 2;
3664 adev->mode_info.num_hpd = 2;
3665 adev->mode_info.num_dig = 2;
3670 adev->mode_info.num_crtc = 6;
3671 adev->mode_info.num_hpd = 6;
3672 adev->mode_info.num_dig = 6;
3675 adev->mode_info.num_crtc = 4;
3676 adev->mode_info.num_hpd = 6;
3677 adev->mode_info.num_dig = 7;
3681 adev->mode_info.num_crtc = 2;
3682 adev->mode_info.num_hpd = 6;
3683 adev->mode_info.num_dig = 6;
3687 adev->mode_info.num_crtc = 6;
3688 adev->mode_info.num_hpd = 6;
3689 adev->mode_info.num_dig = 7;
3692 adev->mode_info.num_crtc = 3;
3693 adev->mode_info.num_hpd = 6;
3694 adev->mode_info.num_dig = 9;
3697 adev->mode_info.num_crtc = 2;
3698 adev->mode_info.num_hpd = 6;
3699 adev->mode_info.num_dig = 9;
3701 case CHIP_POLARIS11:
3702 case CHIP_POLARIS12:
3703 adev->mode_info.num_crtc = 5;
3704 adev->mode_info.num_hpd = 5;
3705 adev->mode_info.num_dig = 5;
3707 case CHIP_POLARIS10:
3709 adev->mode_info.num_crtc = 6;
3710 adev->mode_info.num_hpd = 6;
3711 adev->mode_info.num_dig = 6;
3716 adev->mode_info.num_crtc = 6;
3717 adev->mode_info.num_hpd = 6;
3718 adev->mode_info.num_dig = 6;
3720 #if defined(CONFIG_DRM_AMD_DC_DCN)
3724 adev->mode_info.num_crtc = 4;
3725 adev->mode_info.num_hpd = 4;
3726 adev->mode_info.num_dig = 4;
3730 case CHIP_SIENNA_CICHLID:
3731 case CHIP_NAVY_FLOUNDER:
3732 adev->mode_info.num_crtc = 6;
3733 adev->mode_info.num_hpd = 6;
3734 adev->mode_info.num_dig = 6;
3737 case CHIP_DIMGREY_CAVEFISH:
3738 adev->mode_info.num_crtc = 5;
3739 adev->mode_info.num_hpd = 5;
3740 adev->mode_info.num_dig = 5;
3744 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3748 amdgpu_dm_set_irq_funcs(adev);
3750 if (adev->mode_info.funcs == NULL)
3751 adev->mode_info.funcs = &dm_display_funcs;
3754 * Note: Do NOT change adev->audio_endpt_rreg and
3755 * adev->audio_endpt_wreg because they are initialised in
3756 * amdgpu_device_init()
3758 #if defined(CONFIG_DEBUG_KERNEL_DC)
3760 adev_to_drm(adev)->dev,
3761 &dev_attr_s3_debug);
3767 static bool modeset_required(struct drm_crtc_state *crtc_state,
3768 struct dc_stream_state *new_stream,
3769 struct dc_stream_state *old_stream)
3771 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3774 static bool modereset_required(struct drm_crtc_state *crtc_state)
3776 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3779 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3781 drm_encoder_cleanup(encoder);
3785 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3786 .destroy = amdgpu_dm_encoder_destroy,
3790 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3791 struct drm_framebuffer *fb,
3792 int *min_downscale, int *max_upscale)
3794 struct amdgpu_device *adev = drm_to_adev(dev);
3795 struct dc *dc = adev->dm.dc;
3796 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3797 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3799 switch (fb->format->format) {
3800 case DRM_FORMAT_P010:
3801 case DRM_FORMAT_NV12:
3802 case DRM_FORMAT_NV21:
3803 *max_upscale = plane_cap->max_upscale_factor.nv12;
3804 *min_downscale = plane_cap->max_downscale_factor.nv12;
3807 case DRM_FORMAT_XRGB16161616F:
3808 case DRM_FORMAT_ARGB16161616F:
3809 case DRM_FORMAT_XBGR16161616F:
3810 case DRM_FORMAT_ABGR16161616F:
3811 *max_upscale = plane_cap->max_upscale_factor.fp16;
3812 *min_downscale = plane_cap->max_downscale_factor.fp16;
3816 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3817 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3822 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3823 * scaling factor of 1.0 == 1000 units.
3825 if (*max_upscale == 1)
3826 *max_upscale = 1000;
3828 if (*min_downscale == 1)
3829 *min_downscale = 1000;
3833 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3834 struct dc_scaling_info *scaling_info)
3836 int scale_w, scale_h, min_downscale, max_upscale;
3838 memset(scaling_info, 0, sizeof(*scaling_info));
3840 /* Source is fixed 16.16 but we ignore mantissa for now... */
3841 scaling_info->src_rect.x = state->src_x >> 16;
3842 scaling_info->src_rect.y = state->src_y >> 16;
3844 scaling_info->src_rect.width = state->src_w >> 16;
3845 if (scaling_info->src_rect.width == 0)
3848 scaling_info->src_rect.height = state->src_h >> 16;
3849 if (scaling_info->src_rect.height == 0)
3852 scaling_info->dst_rect.x = state->crtc_x;
3853 scaling_info->dst_rect.y = state->crtc_y;
3855 if (state->crtc_w == 0)
3858 scaling_info->dst_rect.width = state->crtc_w;
3860 if (state->crtc_h == 0)
3863 scaling_info->dst_rect.height = state->crtc_h;
3865 /* DRM doesn't specify clipping on destination output. */
3866 scaling_info->clip_rect = scaling_info->dst_rect;
3868 /* Validate scaling per-format with DC plane caps */
3869 if (state->plane && state->plane->dev && state->fb) {
3870 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3871 &min_downscale, &max_upscale);
3873 min_downscale = 250;
3874 max_upscale = 16000;
3877 scale_w = scaling_info->dst_rect.width * 1000 /
3878 scaling_info->src_rect.width;
3880 if (scale_w < min_downscale || scale_w > max_upscale)
3883 scale_h = scaling_info->dst_rect.height * 1000 /
3884 scaling_info->src_rect.height;
3886 if (scale_h < min_downscale || scale_h > max_upscale)
3890 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3891 * assume reasonable defaults based on the format.
3898 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3899 uint64_t tiling_flags)
3901 /* Fill GFX8 params */
3902 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3903 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3905 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3906 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3907 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3908 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3909 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3911 /* XXX fix me for VI */
3912 tiling_info->gfx8.num_banks = num_banks;
3913 tiling_info->gfx8.array_mode =
3914 DC_ARRAY_2D_TILED_THIN1;
3915 tiling_info->gfx8.tile_split = tile_split;
3916 tiling_info->gfx8.bank_width = bankw;
3917 tiling_info->gfx8.bank_height = bankh;
3918 tiling_info->gfx8.tile_aspect = mtaspect;
3919 tiling_info->gfx8.tile_mode =
3920 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3921 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3922 == DC_ARRAY_1D_TILED_THIN1) {
3923 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3926 tiling_info->gfx8.pipe_config =
3927 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3931 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3932 union dc_tiling_info *tiling_info)
3934 tiling_info->gfx9.num_pipes =
3935 adev->gfx.config.gb_addr_config_fields.num_pipes;
3936 tiling_info->gfx9.num_banks =
3937 adev->gfx.config.gb_addr_config_fields.num_banks;
3938 tiling_info->gfx9.pipe_interleave =
3939 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3940 tiling_info->gfx9.num_shader_engines =
3941 adev->gfx.config.gb_addr_config_fields.num_se;
3942 tiling_info->gfx9.max_compressed_frags =
3943 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3944 tiling_info->gfx9.num_rb_per_se =
3945 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3946 tiling_info->gfx9.shaderEnable = 1;
3947 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3948 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3949 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3950 adev->asic_type == CHIP_VANGOGH)
3951 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3955 validate_dcc(struct amdgpu_device *adev,
3956 const enum surface_pixel_format format,
3957 const enum dc_rotation_angle rotation,
3958 const union dc_tiling_info *tiling_info,
3959 const struct dc_plane_dcc_param *dcc,
3960 const struct dc_plane_address *address,
3961 const struct plane_size *plane_size)
3963 struct dc *dc = adev->dm.dc;
3964 struct dc_dcc_surface_param input;
3965 struct dc_surface_dcc_cap output;
3967 memset(&input, 0, sizeof(input));
3968 memset(&output, 0, sizeof(output));
3973 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3974 !dc->cap_funcs.get_dcc_compression_cap)
3977 input.format = format;
3978 input.surface_size.width = plane_size->surface_size.width;
3979 input.surface_size.height = plane_size->surface_size.height;
3980 input.swizzle_mode = tiling_info->gfx9.swizzle;
3982 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3983 input.scan = SCAN_DIRECTION_HORIZONTAL;
3984 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3985 input.scan = SCAN_DIRECTION_VERTICAL;
3987 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3990 if (!output.capable)
3993 if (dcc->independent_64b_blks == 0 &&
3994 output.grph.rgb.independent_64b_blks != 0)
4001 modifier_has_dcc(uint64_t modifier)
4003 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4007 modifier_gfx9_swizzle_mode(uint64_t modifier)
4009 if (modifier == DRM_FORMAT_MOD_LINEAR)
4012 return AMD_FMT_MOD_GET(TILE, modifier);
4015 static const struct drm_format_info *
4016 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4018 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4022 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4023 union dc_tiling_info *tiling_info,
4026 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4027 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4028 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4029 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4031 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4033 if (!IS_AMD_FMT_MOD(modifier))
4036 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4037 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4039 if (adev->family >= AMDGPU_FAMILY_NV) {
4040 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4042 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4044 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4048 enum dm_micro_swizzle {
4049 MICRO_SWIZZLE_Z = 0,
4050 MICRO_SWIZZLE_S = 1,
4051 MICRO_SWIZZLE_D = 2,
4055 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4059 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4060 const struct drm_format_info *info = drm_format_info(format);
4062 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4068 * We always have to allow this modifier, because core DRM still
4069 * checks LINEAR support if userspace does not provide modifers.
4071 if (modifier == DRM_FORMAT_MOD_LINEAR)
4075 * The arbitrary tiling support for multiplane formats has not been hooked
4078 if (info->num_planes > 1)
4082 * For D swizzle the canonical modifier depends on the bpp, so check
4085 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4086 adev->family >= AMDGPU_FAMILY_NV) {
4087 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4091 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4095 if (modifier_has_dcc(modifier)) {
4096 /* Per radeonsi comments 16/64 bpp are more complicated. */
4097 if (info->cpp[0] != 4)
4105 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4110 if (*cap - *size < 1) {
4111 uint64_t new_cap = *cap * 2;
4112 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4120 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4126 (*mods)[*size] = mod;
4131 add_gfx9_modifiers(const struct amdgpu_device *adev,
4132 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4134 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4135 int pipe_xor_bits = min(8, pipes +
4136 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4137 int bank_xor_bits = min(8 - pipe_xor_bits,
4138 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4139 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4140 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4143 if (adev->family == AMDGPU_FAMILY_RV) {
4144 /* Raven2 and later */
4145 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4148 * No _D DCC swizzles yet because we only allow 32bpp, which
4149 * doesn't support _D on DCN
4152 if (has_constant_encode) {
4153 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4154 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4155 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4156 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4157 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4158 AMD_FMT_MOD_SET(DCC, 1) |
4159 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4160 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4161 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4164 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4165 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4166 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4167 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4168 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4169 AMD_FMT_MOD_SET(DCC, 1) |
4170 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4171 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4172 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4174 if (has_constant_encode) {
4175 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4176 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4177 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4178 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4179 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4180 AMD_FMT_MOD_SET(DCC, 1) |
4181 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4182 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4183 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4185 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4186 AMD_FMT_MOD_SET(RB, rb) |
4187 AMD_FMT_MOD_SET(PIPE, pipes));
4190 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4191 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4192 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4193 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4194 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4195 AMD_FMT_MOD_SET(DCC, 1) |
4196 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4197 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4198 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4199 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4200 AMD_FMT_MOD_SET(RB, rb) |
4201 AMD_FMT_MOD_SET(PIPE, pipes));
4205 * Only supported for 64bpp on Raven, will be filtered on format in
4206 * dm_plane_format_mod_supported.
4208 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4209 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4210 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4211 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4212 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4214 if (adev->family == AMDGPU_FAMILY_RV) {
4215 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4216 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4217 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4218 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4219 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4223 * Only supported for 64bpp on Raven, will be filtered on format in
4224 * dm_plane_format_mod_supported.
4226 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4227 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4228 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4230 if (adev->family == AMDGPU_FAMILY_RV) {
4231 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4232 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4233 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4238 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4239 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4241 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4243 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4244 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4245 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4246 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4247 AMD_FMT_MOD_SET(DCC, 1) |
4248 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4249 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4250 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4252 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4253 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4254 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4255 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4256 AMD_FMT_MOD_SET(DCC, 1) |
4257 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4258 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4259 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4260 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4262 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4263 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4264 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4265 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4267 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4268 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4269 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4270 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4273 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4274 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4275 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4276 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4278 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4279 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4280 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4284 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4285 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4287 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4288 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4290 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4291 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4292 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4293 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4294 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4295 AMD_FMT_MOD_SET(DCC, 1) |
4296 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4297 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4298 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4299 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4301 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4302 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4303 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4304 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4305 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4306 AMD_FMT_MOD_SET(DCC, 1) |
4307 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4308 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4309 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4310 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4311 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4313 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4314 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4315 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4316 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4317 AMD_FMT_MOD_SET(PACKERS, pkrs));
4319 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4320 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4321 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4322 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4323 AMD_FMT_MOD_SET(PACKERS, pkrs));
4325 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4326 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4327 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4328 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4330 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4331 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4332 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4336 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4338 uint64_t size = 0, capacity = 128;
4341 /* We have not hooked up any pre-GFX9 modifiers. */
4342 if (adev->family < AMDGPU_FAMILY_AI)
4345 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4347 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4348 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4349 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4350 return *mods ? 0 : -ENOMEM;
4353 switch (adev->family) {
4354 case AMDGPU_FAMILY_AI:
4355 case AMDGPU_FAMILY_RV:
4356 add_gfx9_modifiers(adev, mods, &size, &capacity);
4358 case AMDGPU_FAMILY_NV:
4359 case AMDGPU_FAMILY_VGH:
4360 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4361 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4363 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4367 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4369 /* INVALID marks the end of the list. */
4370 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4379 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4380 const struct amdgpu_framebuffer *afb,
4381 const enum surface_pixel_format format,
4382 const enum dc_rotation_angle rotation,
4383 const struct plane_size *plane_size,
4384 union dc_tiling_info *tiling_info,
4385 struct dc_plane_dcc_param *dcc,
4386 struct dc_plane_address *address,
4387 const bool force_disable_dcc)
4389 const uint64_t modifier = afb->base.modifier;
4392 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4393 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4395 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4396 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4399 dcc->meta_pitch = afb->base.pitches[1];
4400 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4402 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4403 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4406 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4414 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4415 const struct amdgpu_framebuffer *afb,
4416 const enum surface_pixel_format format,
4417 const enum dc_rotation_angle rotation,
4418 const uint64_t tiling_flags,
4419 union dc_tiling_info *tiling_info,
4420 struct plane_size *plane_size,
4421 struct dc_plane_dcc_param *dcc,
4422 struct dc_plane_address *address,
4424 bool force_disable_dcc)
4426 const struct drm_framebuffer *fb = &afb->base;
4429 memset(tiling_info, 0, sizeof(*tiling_info));
4430 memset(plane_size, 0, sizeof(*plane_size));
4431 memset(dcc, 0, sizeof(*dcc));
4432 memset(address, 0, sizeof(*address));
4434 address->tmz_surface = tmz_surface;
4436 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4437 uint64_t addr = afb->address + fb->offsets[0];
4439 plane_size->surface_size.x = 0;
4440 plane_size->surface_size.y = 0;
4441 plane_size->surface_size.width = fb->width;
4442 plane_size->surface_size.height = fb->height;
4443 plane_size->surface_pitch =
4444 fb->pitches[0] / fb->format->cpp[0];
4446 address->type = PLN_ADDR_TYPE_GRAPHICS;
4447 address->grph.addr.low_part = lower_32_bits(addr);
4448 address->grph.addr.high_part = upper_32_bits(addr);
4449 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4450 uint64_t luma_addr = afb->address + fb->offsets[0];
4451 uint64_t chroma_addr = afb->address + fb->offsets[1];
4453 plane_size->surface_size.x = 0;
4454 plane_size->surface_size.y = 0;
4455 plane_size->surface_size.width = fb->width;
4456 plane_size->surface_size.height = fb->height;
4457 plane_size->surface_pitch =
4458 fb->pitches[0] / fb->format->cpp[0];
4460 plane_size->chroma_size.x = 0;
4461 plane_size->chroma_size.y = 0;
4462 /* TODO: set these based on surface format */
4463 plane_size->chroma_size.width = fb->width / 2;
4464 plane_size->chroma_size.height = fb->height / 2;
4466 plane_size->chroma_pitch =
4467 fb->pitches[1] / fb->format->cpp[1];
4469 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4470 address->video_progressive.luma_addr.low_part =
4471 lower_32_bits(luma_addr);
4472 address->video_progressive.luma_addr.high_part =
4473 upper_32_bits(luma_addr);
4474 address->video_progressive.chroma_addr.low_part =
4475 lower_32_bits(chroma_addr);
4476 address->video_progressive.chroma_addr.high_part =
4477 upper_32_bits(chroma_addr);
4480 if (adev->family >= AMDGPU_FAMILY_AI) {
4481 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4482 rotation, plane_size,
4489 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4496 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4497 bool *per_pixel_alpha, bool *global_alpha,
4498 int *global_alpha_value)
4500 *per_pixel_alpha = false;
4501 *global_alpha = false;
4502 *global_alpha_value = 0xff;
4504 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4507 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4508 static const uint32_t alpha_formats[] = {
4509 DRM_FORMAT_ARGB8888,
4510 DRM_FORMAT_RGBA8888,
4511 DRM_FORMAT_ABGR8888,
4513 uint32_t format = plane_state->fb->format->format;
4516 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4517 if (format == alpha_formats[i]) {
4518 *per_pixel_alpha = true;
4524 if (plane_state->alpha < 0xffff) {
4525 *global_alpha = true;
4526 *global_alpha_value = plane_state->alpha >> 8;
4531 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4532 const enum surface_pixel_format format,
4533 enum dc_color_space *color_space)
4537 *color_space = COLOR_SPACE_SRGB;
4539 /* DRM color properties only affect non-RGB formats. */
4540 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4543 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4545 switch (plane_state->color_encoding) {
4546 case DRM_COLOR_YCBCR_BT601:
4548 *color_space = COLOR_SPACE_YCBCR601;
4550 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4553 case DRM_COLOR_YCBCR_BT709:
4555 *color_space = COLOR_SPACE_YCBCR709;
4557 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4560 case DRM_COLOR_YCBCR_BT2020:
4562 *color_space = COLOR_SPACE_2020_YCBCR;
4575 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4576 const struct drm_plane_state *plane_state,
4577 const uint64_t tiling_flags,
4578 struct dc_plane_info *plane_info,
4579 struct dc_plane_address *address,
4581 bool force_disable_dcc)
4583 const struct drm_framebuffer *fb = plane_state->fb;
4584 const struct amdgpu_framebuffer *afb =
4585 to_amdgpu_framebuffer(plane_state->fb);
4586 struct drm_format_name_buf format_name;
4589 memset(plane_info, 0, sizeof(*plane_info));
4591 switch (fb->format->format) {
4593 plane_info->format =
4594 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4596 case DRM_FORMAT_RGB565:
4597 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4599 case DRM_FORMAT_XRGB8888:
4600 case DRM_FORMAT_ARGB8888:
4601 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4603 case DRM_FORMAT_XRGB2101010:
4604 case DRM_FORMAT_ARGB2101010:
4605 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4607 case DRM_FORMAT_XBGR2101010:
4608 case DRM_FORMAT_ABGR2101010:
4609 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4611 case DRM_FORMAT_XBGR8888:
4612 case DRM_FORMAT_ABGR8888:
4613 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4615 case DRM_FORMAT_NV21:
4616 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4618 case DRM_FORMAT_NV12:
4619 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4621 case DRM_FORMAT_P010:
4622 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4624 case DRM_FORMAT_XRGB16161616F:
4625 case DRM_FORMAT_ARGB16161616F:
4626 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4628 case DRM_FORMAT_XBGR16161616F:
4629 case DRM_FORMAT_ABGR16161616F:
4630 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4634 "Unsupported screen format %s\n",
4635 drm_get_format_name(fb->format->format, &format_name));
4639 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4640 case DRM_MODE_ROTATE_0:
4641 plane_info->rotation = ROTATION_ANGLE_0;
4643 case DRM_MODE_ROTATE_90:
4644 plane_info->rotation = ROTATION_ANGLE_90;
4646 case DRM_MODE_ROTATE_180:
4647 plane_info->rotation = ROTATION_ANGLE_180;
4649 case DRM_MODE_ROTATE_270:
4650 plane_info->rotation = ROTATION_ANGLE_270;
4653 plane_info->rotation = ROTATION_ANGLE_0;
4657 plane_info->visible = true;
4658 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4660 plane_info->layer_index = 0;
4662 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4663 &plane_info->color_space);
4667 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4668 plane_info->rotation, tiling_flags,
4669 &plane_info->tiling_info,
4670 &plane_info->plane_size,
4671 &plane_info->dcc, address, tmz_surface,
4676 fill_blending_from_plane_state(
4677 plane_state, &plane_info->per_pixel_alpha,
4678 &plane_info->global_alpha, &plane_info->global_alpha_value);
4683 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4684 struct dc_plane_state *dc_plane_state,
4685 struct drm_plane_state *plane_state,
4686 struct drm_crtc_state *crtc_state)
4688 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4689 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4690 struct dc_scaling_info scaling_info;
4691 struct dc_plane_info plane_info;
4693 bool force_disable_dcc = false;
4695 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4699 dc_plane_state->src_rect = scaling_info.src_rect;
4700 dc_plane_state->dst_rect = scaling_info.dst_rect;
4701 dc_plane_state->clip_rect = scaling_info.clip_rect;
4702 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4704 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4705 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4708 &dc_plane_state->address,
4714 dc_plane_state->format = plane_info.format;
4715 dc_plane_state->color_space = plane_info.color_space;
4716 dc_plane_state->format = plane_info.format;
4717 dc_plane_state->plane_size = plane_info.plane_size;
4718 dc_plane_state->rotation = plane_info.rotation;
4719 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4720 dc_plane_state->stereo_format = plane_info.stereo_format;
4721 dc_plane_state->tiling_info = plane_info.tiling_info;
4722 dc_plane_state->visible = plane_info.visible;
4723 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4724 dc_plane_state->global_alpha = plane_info.global_alpha;
4725 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4726 dc_plane_state->dcc = plane_info.dcc;
4727 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4728 dc_plane_state->flip_int_enabled = true;
4731 * Always set input transfer function, since plane state is refreshed
4734 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4741 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4742 const struct dm_connector_state *dm_state,
4743 struct dc_stream_state *stream)
4745 enum amdgpu_rmx_type rmx_type;
4747 struct rect src = { 0 }; /* viewport in composition space*/
4748 struct rect dst = { 0 }; /* stream addressable area */
4750 /* no mode. nothing to be done */
4754 /* Full screen scaling by default */
4755 src.width = mode->hdisplay;
4756 src.height = mode->vdisplay;
4757 dst.width = stream->timing.h_addressable;
4758 dst.height = stream->timing.v_addressable;
4761 rmx_type = dm_state->scaling;
4762 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4763 if (src.width * dst.height <
4764 src.height * dst.width) {
4765 /* height needs less upscaling/more downscaling */
4766 dst.width = src.width *
4767 dst.height / src.height;
4769 /* width needs less upscaling/more downscaling */
4770 dst.height = src.height *
4771 dst.width / src.width;
4773 } else if (rmx_type == RMX_CENTER) {
4777 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4778 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4780 if (dm_state->underscan_enable) {
4781 dst.x += dm_state->underscan_hborder / 2;
4782 dst.y += dm_state->underscan_vborder / 2;
4783 dst.width -= dm_state->underscan_hborder;
4784 dst.height -= dm_state->underscan_vborder;
4791 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4792 dst.x, dst.y, dst.width, dst.height);
4796 static enum dc_color_depth
4797 convert_color_depth_from_display_info(const struct drm_connector *connector,
4798 bool is_y420, int requested_bpc)
4805 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4806 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4808 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4810 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4813 bpc = (uint8_t)connector->display_info.bpc;
4814 /* Assume 8 bpc by default if no bpc is specified. */
4815 bpc = bpc ? bpc : 8;
4818 if (requested_bpc > 0) {
4820 * Cap display bpc based on the user requested value.
4822 * The value for state->max_bpc may not correctly updated
4823 * depending on when the connector gets added to the state
4824 * or if this was called outside of atomic check, so it
4825 * can't be used directly.
4827 bpc = min_t(u8, bpc, requested_bpc);
4829 /* Round down to the nearest even number. */
4830 bpc = bpc - (bpc & 1);
4836 * Temporary Work around, DRM doesn't parse color depth for
4837 * EDID revision before 1.4
4838 * TODO: Fix edid parsing
4840 return COLOR_DEPTH_888;
4842 return COLOR_DEPTH_666;
4844 return COLOR_DEPTH_888;
4846 return COLOR_DEPTH_101010;
4848 return COLOR_DEPTH_121212;
4850 return COLOR_DEPTH_141414;
4852 return COLOR_DEPTH_161616;
4854 return COLOR_DEPTH_UNDEFINED;
4858 static enum dc_aspect_ratio
4859 get_aspect_ratio(const struct drm_display_mode *mode_in)
4861 /* 1-1 mapping, since both enums follow the HDMI spec. */
4862 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4865 static enum dc_color_space
4866 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4868 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4870 switch (dc_crtc_timing->pixel_encoding) {
4871 case PIXEL_ENCODING_YCBCR422:
4872 case PIXEL_ENCODING_YCBCR444:
4873 case PIXEL_ENCODING_YCBCR420:
4876 * 27030khz is the separation point between HDTV and SDTV
4877 * according to HDMI spec, we use YCbCr709 and YCbCr601
4880 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4881 if (dc_crtc_timing->flags.Y_ONLY)
4883 COLOR_SPACE_YCBCR709_LIMITED;
4885 color_space = COLOR_SPACE_YCBCR709;
4887 if (dc_crtc_timing->flags.Y_ONLY)
4889 COLOR_SPACE_YCBCR601_LIMITED;
4891 color_space = COLOR_SPACE_YCBCR601;
4896 case PIXEL_ENCODING_RGB:
4897 color_space = COLOR_SPACE_SRGB;
4908 static bool adjust_colour_depth_from_display_info(
4909 struct dc_crtc_timing *timing_out,
4910 const struct drm_display_info *info)
4912 enum dc_color_depth depth = timing_out->display_color_depth;
4915 normalized_clk = timing_out->pix_clk_100hz / 10;
4916 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4917 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4918 normalized_clk /= 2;
4919 /* Adjusting pix clock following on HDMI spec based on colour depth */
4921 case COLOR_DEPTH_888:
4923 case COLOR_DEPTH_101010:
4924 normalized_clk = (normalized_clk * 30) / 24;
4926 case COLOR_DEPTH_121212:
4927 normalized_clk = (normalized_clk * 36) / 24;
4929 case COLOR_DEPTH_161616:
4930 normalized_clk = (normalized_clk * 48) / 24;
4933 /* The above depths are the only ones valid for HDMI. */
4936 if (normalized_clk <= info->max_tmds_clock) {
4937 timing_out->display_color_depth = depth;
4940 } while (--depth > COLOR_DEPTH_666);
4944 static void fill_stream_properties_from_drm_display_mode(
4945 struct dc_stream_state *stream,
4946 const struct drm_display_mode *mode_in,
4947 const struct drm_connector *connector,
4948 const struct drm_connector_state *connector_state,
4949 const struct dc_stream_state *old_stream,
4952 struct dc_crtc_timing *timing_out = &stream->timing;
4953 const struct drm_display_info *info = &connector->display_info;
4954 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4955 struct hdmi_vendor_infoframe hv_frame;
4956 struct hdmi_avi_infoframe avi_frame;
4958 memset(&hv_frame, 0, sizeof(hv_frame));
4959 memset(&avi_frame, 0, sizeof(avi_frame));
4961 timing_out->h_border_left = 0;
4962 timing_out->h_border_right = 0;
4963 timing_out->v_border_top = 0;
4964 timing_out->v_border_bottom = 0;
4965 /* TODO: un-hardcode */
4966 if (drm_mode_is_420_only(info, mode_in)
4967 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4968 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4969 else if (drm_mode_is_420_also(info, mode_in)
4970 && aconnector->force_yuv420_output)
4971 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4972 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4973 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4974 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4976 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4978 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4979 timing_out->display_color_depth = convert_color_depth_from_display_info(
4981 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4983 timing_out->scan_type = SCANNING_TYPE_NODATA;
4984 timing_out->hdmi_vic = 0;
4987 timing_out->vic = old_stream->timing.vic;
4988 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4989 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4991 timing_out->vic = drm_match_cea_mode(mode_in);
4992 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4993 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4994 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4995 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4998 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4999 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5000 timing_out->vic = avi_frame.video_code;
5001 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5002 timing_out->hdmi_vic = hv_frame.vic;
5005 timing_out->h_addressable = mode_in->crtc_hdisplay;
5006 timing_out->h_total = mode_in->crtc_htotal;
5007 timing_out->h_sync_width =
5008 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5009 timing_out->h_front_porch =
5010 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5011 timing_out->v_total = mode_in->crtc_vtotal;
5012 timing_out->v_addressable = mode_in->crtc_vdisplay;
5013 timing_out->v_front_porch =
5014 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5015 timing_out->v_sync_width =
5016 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5017 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5018 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5020 stream->output_color_space = get_output_color_space(timing_out);
5022 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5023 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5024 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5025 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5026 drm_mode_is_420_also(info, mode_in) &&
5027 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5028 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5029 adjust_colour_depth_from_display_info(timing_out, info);
5034 static void fill_audio_info(struct audio_info *audio_info,
5035 const struct drm_connector *drm_connector,
5036 const struct dc_sink *dc_sink)
5039 int cea_revision = 0;
5040 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5042 audio_info->manufacture_id = edid_caps->manufacturer_id;
5043 audio_info->product_id = edid_caps->product_id;
5045 cea_revision = drm_connector->display_info.cea_rev;
5047 strscpy(audio_info->display_name,
5048 edid_caps->display_name,
5049 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5051 if (cea_revision >= 3) {
5052 audio_info->mode_count = edid_caps->audio_mode_count;
5054 for (i = 0; i < audio_info->mode_count; ++i) {
5055 audio_info->modes[i].format_code =
5056 (enum audio_format_code)
5057 (edid_caps->audio_modes[i].format_code);
5058 audio_info->modes[i].channel_count =
5059 edid_caps->audio_modes[i].channel_count;
5060 audio_info->modes[i].sample_rates.all =
5061 edid_caps->audio_modes[i].sample_rate;
5062 audio_info->modes[i].sample_size =
5063 edid_caps->audio_modes[i].sample_size;
5067 audio_info->flags.all = edid_caps->speaker_flags;
5069 /* TODO: We only check for the progressive mode, check for interlace mode too */
5070 if (drm_connector->latency_present[0]) {
5071 audio_info->video_latency = drm_connector->video_latency[0];
5072 audio_info->audio_latency = drm_connector->audio_latency[0];
5075 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5080 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5081 struct drm_display_mode *dst_mode)
5083 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5084 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5085 dst_mode->crtc_clock = src_mode->crtc_clock;
5086 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5087 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5088 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5089 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5090 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5091 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5092 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5093 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5094 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5095 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5096 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5100 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5101 const struct drm_display_mode *native_mode,
5104 if (scale_enabled) {
5105 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5106 } else if (native_mode->clock == drm_mode->clock &&
5107 native_mode->htotal == drm_mode->htotal &&
5108 native_mode->vtotal == drm_mode->vtotal) {
5109 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5111 /* no scaling nor amdgpu inserted, no need to patch */
5115 static struct dc_sink *
5116 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5118 struct dc_sink_init_data sink_init_data = { 0 };
5119 struct dc_sink *sink = NULL;
5120 sink_init_data.link = aconnector->dc_link;
5121 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5123 sink = dc_sink_create(&sink_init_data);
5125 DRM_ERROR("Failed to create sink!\n");
5128 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5133 static void set_multisync_trigger_params(
5134 struct dc_stream_state *stream)
5136 if (stream->triggered_crtc_reset.enabled) {
5137 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5138 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5142 static void set_master_stream(struct dc_stream_state *stream_set[],
5145 int j, highest_rfr = 0, master_stream = 0;
5147 for (j = 0; j < stream_count; j++) {
5148 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5149 int refresh_rate = 0;
5151 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5152 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5153 if (refresh_rate > highest_rfr) {
5154 highest_rfr = refresh_rate;
5159 for (j = 0; j < stream_count; j++) {
5161 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5165 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5169 if (context->stream_count < 2)
5171 for (i = 0; i < context->stream_count ; i++) {
5172 if (!context->streams[i])
5175 * TODO: add a function to read AMD VSDB bits and set
5176 * crtc_sync_master.multi_sync_enabled flag
5177 * For now it's set to false
5179 set_multisync_trigger_params(context->streams[i]);
5181 set_master_stream(context->streams, context->stream_count);
5184 static struct dc_stream_state *
5185 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5186 const struct drm_display_mode *drm_mode,
5187 const struct dm_connector_state *dm_state,
5188 const struct dc_stream_state *old_stream,
5191 struct drm_display_mode *preferred_mode = NULL;
5192 struct drm_connector *drm_connector;
5193 const struct drm_connector_state *con_state =
5194 dm_state ? &dm_state->base : NULL;
5195 struct dc_stream_state *stream = NULL;
5196 struct drm_display_mode mode = *drm_mode;
5197 bool native_mode_found = false;
5198 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5200 int preferred_refresh = 0;
5201 #if defined(CONFIG_DRM_AMD_DC_DCN)
5202 struct dsc_dec_dpcd_caps dsc_caps;
5203 uint32_t link_bandwidth_kbps;
5205 struct dc_sink *sink = NULL;
5206 if (aconnector == NULL) {
5207 DRM_ERROR("aconnector is NULL!\n");
5211 drm_connector = &aconnector->base;
5213 if (!aconnector->dc_sink) {
5214 sink = create_fake_sink(aconnector);
5218 sink = aconnector->dc_sink;
5219 dc_sink_retain(sink);
5222 stream = dc_create_stream_for_sink(sink);
5224 if (stream == NULL) {
5225 DRM_ERROR("Failed to create stream for sink!\n");
5229 stream->dm_stream_context = aconnector;
5231 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5232 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5234 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5235 /* Search for preferred mode */
5236 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5237 native_mode_found = true;
5241 if (!native_mode_found)
5242 preferred_mode = list_first_entry_or_null(
5243 &aconnector->base.modes,
5244 struct drm_display_mode,
5247 mode_refresh = drm_mode_vrefresh(&mode);
5249 if (preferred_mode == NULL) {
5251 * This may not be an error, the use case is when we have no
5252 * usermode calls to reset and set mode upon hotplug. In this
5253 * case, we call set mode ourselves to restore the previous mode
5254 * and the modelist may not be filled in in time.
5256 DRM_DEBUG_DRIVER("No preferred mode found\n");
5258 decide_crtc_timing_for_drm_display_mode(
5259 &mode, preferred_mode,
5260 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5261 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5265 drm_mode_set_crtcinfo(&mode, 0);
5268 * If scaling is enabled and refresh rate didn't change
5269 * we copy the vic and polarities of the old timings
5271 if (!scale || mode_refresh != preferred_refresh)
5272 fill_stream_properties_from_drm_display_mode(stream,
5273 &mode, &aconnector->base, con_state, NULL, requested_bpc);
5275 fill_stream_properties_from_drm_display_mode(stream,
5276 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
5278 stream->timing.flags.DSC = 0;
5280 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5281 #if defined(CONFIG_DRM_AMD_DC_DCN)
5282 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5283 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5284 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5286 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5287 dc_link_get_link_cap(aconnector->dc_link));
5289 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5290 /* Set DSC policy according to dsc_clock_en */
5291 dc_dsc_policy_set_enable_dsc_when_not_needed(
5292 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5294 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5296 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5298 link_bandwidth_kbps,
5300 &stream->timing.dsc_cfg))
5301 stream->timing.flags.DSC = 1;
5302 /* Overwrite the stream flag if DSC is enabled through debugfs */
5303 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5304 stream->timing.flags.DSC = 1;
5306 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5307 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5309 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5310 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5312 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5313 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5318 update_stream_scaling_settings(&mode, dm_state, stream);
5321 &stream->audio_info,
5325 update_stream_signal(stream, sink);
5327 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5328 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5330 if (stream->link->psr_settings.psr_feature_enabled) {
5332 // should decide stream support vsc sdp colorimetry capability
5333 // before building vsc info packet
5335 stream->use_vsc_sdp_for_colorimetry = false;
5336 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5337 stream->use_vsc_sdp_for_colorimetry =
5338 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5340 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5341 stream->use_vsc_sdp_for_colorimetry = true;
5343 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5346 dc_sink_release(sink);
5351 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5353 drm_crtc_cleanup(crtc);
5357 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5358 struct drm_crtc_state *state)
5360 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5362 /* TODO Destroy dc_stream objects are stream object is flattened */
5364 dc_stream_release(cur->stream);
5367 __drm_atomic_helper_crtc_destroy_state(state);
5373 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5375 struct dm_crtc_state *state;
5378 dm_crtc_destroy_state(crtc, crtc->state);
5380 state = kzalloc(sizeof(*state), GFP_KERNEL);
5381 if (WARN_ON(!state))
5384 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5387 static struct drm_crtc_state *
5388 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5390 struct dm_crtc_state *state, *cur;
5392 cur = to_dm_crtc_state(crtc->state);
5394 if (WARN_ON(!crtc->state))
5397 state = kzalloc(sizeof(*state), GFP_KERNEL);
5401 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5404 state->stream = cur->stream;
5405 dc_stream_retain(state->stream);
5408 state->active_planes = cur->active_planes;
5409 state->vrr_infopacket = cur->vrr_infopacket;
5410 state->abm_level = cur->abm_level;
5411 state->vrr_supported = cur->vrr_supported;
5412 state->freesync_config = cur->freesync_config;
5413 state->crc_src = cur->crc_src;
5414 state->cm_has_degamma = cur->cm_has_degamma;
5415 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5417 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5419 return &state->base;
5422 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5424 enum dc_irq_source irq_source;
5425 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5426 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5429 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5431 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5433 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5434 acrtc->crtc_id, enable ? "en" : "dis", rc);
5438 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5440 enum dc_irq_source irq_source;
5441 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5442 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5443 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5444 #if defined(CONFIG_DRM_AMD_DC_DCN)
5445 struct amdgpu_display_manager *dm = &adev->dm;
5446 unsigned long flags;
5451 /* vblank irq on -> Only need vupdate irq in vrr mode */
5452 if (amdgpu_dm_vrr_active(acrtc_state))
5453 rc = dm_set_vupdate_irq(crtc, true);
5455 /* vblank irq off -> vupdate irq off */
5456 rc = dm_set_vupdate_irq(crtc, false);
5462 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5464 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5467 if (amdgpu_in_reset(adev))
5470 #if defined(CONFIG_DRM_AMD_DC_DCN)
5471 spin_lock_irqsave(&dm->vblank_lock, flags);
5472 dm->vblank_workqueue->dm = dm;
5473 dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5474 dm->vblank_workqueue->enable = enable;
5475 spin_unlock_irqrestore(&dm->vblank_lock, flags);
5476 schedule_work(&dm->vblank_workqueue->mall_work);
5482 static int dm_enable_vblank(struct drm_crtc *crtc)
5484 return dm_set_vblank(crtc, true);
5487 static void dm_disable_vblank(struct drm_crtc *crtc)
5489 dm_set_vblank(crtc, false);
5492 /* Implemented only the options currently availible for the driver */
5493 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5494 .reset = dm_crtc_reset_state,
5495 .destroy = amdgpu_dm_crtc_destroy,
5496 .set_config = drm_atomic_helper_set_config,
5497 .page_flip = drm_atomic_helper_page_flip,
5498 .atomic_duplicate_state = dm_crtc_duplicate_state,
5499 .atomic_destroy_state = dm_crtc_destroy_state,
5500 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5501 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5502 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5503 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5504 .enable_vblank = dm_enable_vblank,
5505 .disable_vblank = dm_disable_vblank,
5506 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5509 static enum drm_connector_status
5510 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5513 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5517 * 1. This interface is NOT called in context of HPD irq.
5518 * 2. This interface *is called* in context of user-mode ioctl. Which
5519 * makes it a bad place for *any* MST-related activity.
5522 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5523 !aconnector->fake_enable)
5524 connected = (aconnector->dc_sink != NULL);
5526 connected = (aconnector->base.force == DRM_FORCE_ON);
5528 update_subconnector_property(aconnector);
5530 return (connected ? connector_status_connected :
5531 connector_status_disconnected);
5534 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5535 struct drm_connector_state *connector_state,
5536 struct drm_property *property,
5539 struct drm_device *dev = connector->dev;
5540 struct amdgpu_device *adev = drm_to_adev(dev);
5541 struct dm_connector_state *dm_old_state =
5542 to_dm_connector_state(connector->state);
5543 struct dm_connector_state *dm_new_state =
5544 to_dm_connector_state(connector_state);
5548 if (property == dev->mode_config.scaling_mode_property) {
5549 enum amdgpu_rmx_type rmx_type;
5552 case DRM_MODE_SCALE_CENTER:
5553 rmx_type = RMX_CENTER;
5555 case DRM_MODE_SCALE_ASPECT:
5556 rmx_type = RMX_ASPECT;
5558 case DRM_MODE_SCALE_FULLSCREEN:
5559 rmx_type = RMX_FULL;
5561 case DRM_MODE_SCALE_NONE:
5567 if (dm_old_state->scaling == rmx_type)
5570 dm_new_state->scaling = rmx_type;
5572 } else if (property == adev->mode_info.underscan_hborder_property) {
5573 dm_new_state->underscan_hborder = val;
5575 } else if (property == adev->mode_info.underscan_vborder_property) {
5576 dm_new_state->underscan_vborder = val;
5578 } else if (property == adev->mode_info.underscan_property) {
5579 dm_new_state->underscan_enable = val;
5581 } else if (property == adev->mode_info.abm_level_property) {
5582 dm_new_state->abm_level = val;
5589 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5590 const struct drm_connector_state *state,
5591 struct drm_property *property,
5594 struct drm_device *dev = connector->dev;
5595 struct amdgpu_device *adev = drm_to_adev(dev);
5596 struct dm_connector_state *dm_state =
5597 to_dm_connector_state(state);
5600 if (property == dev->mode_config.scaling_mode_property) {
5601 switch (dm_state->scaling) {
5603 *val = DRM_MODE_SCALE_CENTER;
5606 *val = DRM_MODE_SCALE_ASPECT;
5609 *val = DRM_MODE_SCALE_FULLSCREEN;
5613 *val = DRM_MODE_SCALE_NONE;
5617 } else if (property == adev->mode_info.underscan_hborder_property) {
5618 *val = dm_state->underscan_hborder;
5620 } else if (property == adev->mode_info.underscan_vborder_property) {
5621 *val = dm_state->underscan_vborder;
5623 } else if (property == adev->mode_info.underscan_property) {
5624 *val = dm_state->underscan_enable;
5626 } else if (property == adev->mode_info.abm_level_property) {
5627 *val = dm_state->abm_level;
5634 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5636 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5638 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5641 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5643 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5644 const struct dc_link *link = aconnector->dc_link;
5645 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5646 struct amdgpu_display_manager *dm = &adev->dm;
5649 * Call only if mst_mgr was iniitalized before since it's not done
5650 * for all connector types.
5652 if (aconnector->mst_mgr.dev)
5653 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5655 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5656 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5658 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5659 link->type != dc_connection_none &&
5660 dm->backlight_dev) {
5661 backlight_device_unregister(dm->backlight_dev);
5662 dm->backlight_dev = NULL;
5666 if (aconnector->dc_em_sink)
5667 dc_sink_release(aconnector->dc_em_sink);
5668 aconnector->dc_em_sink = NULL;
5669 if (aconnector->dc_sink)
5670 dc_sink_release(aconnector->dc_sink);
5671 aconnector->dc_sink = NULL;
5673 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5674 drm_connector_unregister(connector);
5675 drm_connector_cleanup(connector);
5676 if (aconnector->i2c) {
5677 i2c_del_adapter(&aconnector->i2c->base);
5678 kfree(aconnector->i2c);
5680 kfree(aconnector->dm_dp_aux.aux.name);
5685 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5687 struct dm_connector_state *state =
5688 to_dm_connector_state(connector->state);
5690 if (connector->state)
5691 __drm_atomic_helper_connector_destroy_state(connector->state);
5695 state = kzalloc(sizeof(*state), GFP_KERNEL);
5698 state->scaling = RMX_OFF;
5699 state->underscan_enable = false;
5700 state->underscan_hborder = 0;
5701 state->underscan_vborder = 0;
5702 state->base.max_requested_bpc = 8;
5703 state->vcpi_slots = 0;
5705 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5706 state->abm_level = amdgpu_dm_abm_level;
5708 __drm_atomic_helper_connector_reset(connector, &state->base);
5712 struct drm_connector_state *
5713 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5715 struct dm_connector_state *state =
5716 to_dm_connector_state(connector->state);
5718 struct dm_connector_state *new_state =
5719 kmemdup(state, sizeof(*state), GFP_KERNEL);
5724 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5726 new_state->freesync_capable = state->freesync_capable;
5727 new_state->abm_level = state->abm_level;
5728 new_state->scaling = state->scaling;
5729 new_state->underscan_enable = state->underscan_enable;
5730 new_state->underscan_hborder = state->underscan_hborder;
5731 new_state->underscan_vborder = state->underscan_vborder;
5732 new_state->vcpi_slots = state->vcpi_slots;
5733 new_state->pbn = state->pbn;
5734 return &new_state->base;
5738 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5740 struct amdgpu_dm_connector *amdgpu_dm_connector =
5741 to_amdgpu_dm_connector(connector);
5744 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5745 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5746 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5747 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5752 #if defined(CONFIG_DEBUG_FS)
5753 connector_debugfs_init(amdgpu_dm_connector);
5759 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5760 .reset = amdgpu_dm_connector_funcs_reset,
5761 .detect = amdgpu_dm_connector_detect,
5762 .fill_modes = drm_helper_probe_single_connector_modes,
5763 .destroy = amdgpu_dm_connector_destroy,
5764 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5765 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5766 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5767 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5768 .late_register = amdgpu_dm_connector_late_register,
5769 .early_unregister = amdgpu_dm_connector_unregister
5772 static int get_modes(struct drm_connector *connector)
5774 return amdgpu_dm_connector_get_modes(connector);
5777 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5779 struct dc_sink_init_data init_params = {
5780 .link = aconnector->dc_link,
5781 .sink_signal = SIGNAL_TYPE_VIRTUAL
5785 if (!aconnector->base.edid_blob_ptr) {
5786 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5787 aconnector->base.name);
5789 aconnector->base.force = DRM_FORCE_OFF;
5790 aconnector->base.override_edid = false;
5794 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5796 aconnector->edid = edid;
5798 aconnector->dc_em_sink = dc_link_add_remote_sink(
5799 aconnector->dc_link,
5801 (edid->extensions + 1) * EDID_LENGTH,
5804 if (aconnector->base.force == DRM_FORCE_ON) {
5805 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5806 aconnector->dc_link->local_sink :
5807 aconnector->dc_em_sink;
5808 dc_sink_retain(aconnector->dc_sink);
5812 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5814 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5817 * In case of headless boot with force on for DP managed connector
5818 * Those settings have to be != 0 to get initial modeset
5820 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5821 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5822 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5826 aconnector->base.override_edid = true;
5827 create_eml_sink(aconnector);
5830 static struct dc_stream_state *
5831 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5832 const struct drm_display_mode *drm_mode,
5833 const struct dm_connector_state *dm_state,
5834 const struct dc_stream_state *old_stream)
5836 struct drm_connector *connector = &aconnector->base;
5837 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5838 struct dc_stream_state *stream;
5839 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5840 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5841 enum dc_status dc_result = DC_OK;
5844 stream = create_stream_for_sink(aconnector, drm_mode,
5845 dm_state, old_stream,
5847 if (stream == NULL) {
5848 DRM_ERROR("Failed to create stream for sink!\n");
5852 dc_result = dc_validate_stream(adev->dm.dc, stream);
5854 if (dc_result != DC_OK) {
5855 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5860 dc_status_to_str(dc_result));
5862 dc_stream_release(stream);
5864 requested_bpc -= 2; /* lower bpc to retry validation */
5867 } while (stream == NULL && requested_bpc >= 6);
5872 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5873 struct drm_display_mode *mode)
5875 int result = MODE_ERROR;
5876 struct dc_sink *dc_sink;
5877 /* TODO: Unhardcode stream count */
5878 struct dc_stream_state *stream;
5879 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5881 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5882 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5886 * Only run this the first time mode_valid is called to initilialize
5889 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5890 !aconnector->dc_em_sink)
5891 handle_edid_mgmt(aconnector);
5893 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5895 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5896 aconnector->base.force != DRM_FORCE_ON) {
5897 DRM_ERROR("dc_sink is NULL!\n");
5901 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5903 dc_stream_release(stream);
5908 /* TODO: error handling*/
5912 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5913 struct dc_info_packet *out)
5915 struct hdmi_drm_infoframe frame;
5916 unsigned char buf[30]; /* 26 + 4 */
5920 memset(out, 0, sizeof(*out));
5922 if (!state->hdr_output_metadata)
5925 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5929 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5933 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5937 /* Prepare the infopacket for DC. */
5938 switch (state->connector->connector_type) {
5939 case DRM_MODE_CONNECTOR_HDMIA:
5940 out->hb0 = 0x87; /* type */
5941 out->hb1 = 0x01; /* version */
5942 out->hb2 = 0x1A; /* length */
5943 out->sb[0] = buf[3]; /* checksum */
5947 case DRM_MODE_CONNECTOR_DisplayPort:
5948 case DRM_MODE_CONNECTOR_eDP:
5949 out->hb0 = 0x00; /* sdp id, zero */
5950 out->hb1 = 0x87; /* type */
5951 out->hb2 = 0x1D; /* payload len - 1 */
5952 out->hb3 = (0x13 << 2); /* sdp version */
5953 out->sb[0] = 0x01; /* version */
5954 out->sb[1] = 0x1A; /* length */
5962 memcpy(&out->sb[i], &buf[4], 26);
5965 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5966 sizeof(out->sb), false);
5972 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5973 const struct drm_connector_state *new_state)
5975 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5976 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5978 if (old_blob != new_blob) {
5979 if (old_blob && new_blob &&
5980 old_blob->length == new_blob->length)
5981 return memcmp(old_blob->data, new_blob->data,
5991 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5992 struct drm_atomic_state *state)
5994 struct drm_connector_state *new_con_state =
5995 drm_atomic_get_new_connector_state(state, conn);
5996 struct drm_connector_state *old_con_state =
5997 drm_atomic_get_old_connector_state(state, conn);
5998 struct drm_crtc *crtc = new_con_state->crtc;
5999 struct drm_crtc_state *new_crtc_state;
6002 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6007 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6008 struct dc_info_packet hdr_infopacket;
6010 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6014 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6015 if (IS_ERR(new_crtc_state))
6016 return PTR_ERR(new_crtc_state);
6019 * DC considers the stream backends changed if the
6020 * static metadata changes. Forcing the modeset also
6021 * gives a simple way for userspace to switch from
6022 * 8bpc to 10bpc when setting the metadata to enter
6025 * Changing the static metadata after it's been
6026 * set is permissible, however. So only force a
6027 * modeset if we're entering or exiting HDR.
6029 new_crtc_state->mode_changed =
6030 !old_con_state->hdr_output_metadata ||
6031 !new_con_state->hdr_output_metadata;
6037 static const struct drm_connector_helper_funcs
6038 amdgpu_dm_connector_helper_funcs = {
6040 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6041 * modes will be filtered by drm_mode_validate_size(), and those modes
6042 * are missing after user start lightdm. So we need to renew modes list.
6043 * in get_modes call back, not just return the modes count
6045 .get_modes = get_modes,
6046 .mode_valid = amdgpu_dm_connector_mode_valid,
6047 .atomic_check = amdgpu_dm_connector_atomic_check,
6050 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6054 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6056 struct drm_atomic_state *state = new_crtc_state->state;
6057 struct drm_plane *plane;
6060 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6061 struct drm_plane_state *new_plane_state;
6063 /* Cursor planes are "fake". */
6064 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6067 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6069 if (!new_plane_state) {
6071 * The plane is enable on the CRTC and hasn't changed
6072 * state. This means that it previously passed
6073 * validation and is therefore enabled.
6079 /* We need a framebuffer to be considered enabled. */
6080 num_active += (new_plane_state->fb != NULL);
6086 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6087 struct drm_crtc_state *new_crtc_state)
6089 struct dm_crtc_state *dm_new_crtc_state =
6090 to_dm_crtc_state(new_crtc_state);
6092 dm_new_crtc_state->active_planes = 0;
6094 if (!dm_new_crtc_state->stream)
6097 dm_new_crtc_state->active_planes =
6098 count_crtc_active_planes(new_crtc_state);
6101 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6102 struct drm_atomic_state *state)
6104 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6106 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6107 struct dc *dc = adev->dm.dc;
6108 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6111 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6113 dm_update_crtc_active_planes(crtc, crtc_state);
6115 if (unlikely(!dm_crtc_state->stream &&
6116 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6122 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6123 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6124 * planes are disabled, which is not supported by the hardware. And there is legacy
6125 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6127 if (crtc_state->enable &&
6128 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6129 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6133 /* In some use cases, like reset, no stream is attached */
6134 if (!dm_crtc_state->stream)
6137 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6140 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6144 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6145 const struct drm_display_mode *mode,
6146 struct drm_display_mode *adjusted_mode)
6151 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6152 .disable = dm_crtc_helper_disable,
6153 .atomic_check = dm_crtc_helper_atomic_check,
6154 .mode_fixup = dm_crtc_helper_mode_fixup,
6155 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6158 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6163 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6165 switch (display_color_depth) {
6166 case COLOR_DEPTH_666:
6168 case COLOR_DEPTH_888:
6170 case COLOR_DEPTH_101010:
6172 case COLOR_DEPTH_121212:
6174 case COLOR_DEPTH_141414:
6176 case COLOR_DEPTH_161616:
6184 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6185 struct drm_crtc_state *crtc_state,
6186 struct drm_connector_state *conn_state)
6188 struct drm_atomic_state *state = crtc_state->state;
6189 struct drm_connector *connector = conn_state->connector;
6190 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6191 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6192 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6193 struct drm_dp_mst_topology_mgr *mst_mgr;
6194 struct drm_dp_mst_port *mst_port;
6195 enum dc_color_depth color_depth;
6197 bool is_y420 = false;
6199 if (!aconnector->port || !aconnector->dc_sink)
6202 mst_port = aconnector->port;
6203 mst_mgr = &aconnector->mst_port->mst_mgr;
6205 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6208 if (!state->duplicated) {
6209 int max_bpc = conn_state->max_requested_bpc;
6210 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6211 aconnector->force_yuv420_output;
6212 color_depth = convert_color_depth_from_display_info(connector,
6215 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6216 clock = adjusted_mode->clock;
6217 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6219 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6222 dm_new_connector_state->pbn,
6223 dm_mst_get_pbn_divider(aconnector->dc_link));
6224 if (dm_new_connector_state->vcpi_slots < 0) {
6225 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6226 return dm_new_connector_state->vcpi_slots;
6231 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6232 .disable = dm_encoder_helper_disable,
6233 .atomic_check = dm_encoder_helper_atomic_check
6236 #if defined(CONFIG_DRM_AMD_DC_DCN)
6237 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6238 struct dc_state *dc_state)
6240 struct dc_stream_state *stream = NULL;
6241 struct drm_connector *connector;
6242 struct drm_connector_state *new_con_state, *old_con_state;
6243 struct amdgpu_dm_connector *aconnector;
6244 struct dm_connector_state *dm_conn_state;
6245 int i, j, clock, bpp;
6246 int vcpi, pbn_div, pbn = 0;
6248 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6250 aconnector = to_amdgpu_dm_connector(connector);
6252 if (!aconnector->port)
6255 if (!new_con_state || !new_con_state->crtc)
6258 dm_conn_state = to_dm_connector_state(new_con_state);
6260 for (j = 0; j < dc_state->stream_count; j++) {
6261 stream = dc_state->streams[j];
6265 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6274 if (stream->timing.flags.DSC != 1) {
6275 drm_dp_mst_atomic_enable_dsc(state,
6283 pbn_div = dm_mst_get_pbn_divider(stream->link);
6284 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6285 clock = stream->timing.pix_clk_100hz / 10;
6286 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6287 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6294 dm_conn_state->pbn = pbn;
6295 dm_conn_state->vcpi_slots = vcpi;
6301 static void dm_drm_plane_reset(struct drm_plane *plane)
6303 struct dm_plane_state *amdgpu_state = NULL;
6306 plane->funcs->atomic_destroy_state(plane, plane->state);
6308 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6309 WARN_ON(amdgpu_state == NULL);
6312 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6315 static struct drm_plane_state *
6316 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6318 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6320 old_dm_plane_state = to_dm_plane_state(plane->state);
6321 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6322 if (!dm_plane_state)
6325 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6327 if (old_dm_plane_state->dc_state) {
6328 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6329 dc_plane_state_retain(dm_plane_state->dc_state);
6332 return &dm_plane_state->base;
6335 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6336 struct drm_plane_state *state)
6338 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6340 if (dm_plane_state->dc_state)
6341 dc_plane_state_release(dm_plane_state->dc_state);
6343 drm_atomic_helper_plane_destroy_state(plane, state);
6346 static const struct drm_plane_funcs dm_plane_funcs = {
6347 .update_plane = drm_atomic_helper_update_plane,
6348 .disable_plane = drm_atomic_helper_disable_plane,
6349 .destroy = drm_primary_helper_destroy,
6350 .reset = dm_drm_plane_reset,
6351 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6352 .atomic_destroy_state = dm_drm_plane_destroy_state,
6353 .format_mod_supported = dm_plane_format_mod_supported,
6356 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6357 struct drm_plane_state *new_state)
6359 struct amdgpu_framebuffer *afb;
6360 struct drm_gem_object *obj;
6361 struct amdgpu_device *adev;
6362 struct amdgpu_bo *rbo;
6363 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6364 struct list_head list;
6365 struct ttm_validate_buffer tv;
6366 struct ww_acquire_ctx ticket;
6370 if (!new_state->fb) {
6371 DRM_DEBUG_DRIVER("No FB bound\n");
6375 afb = to_amdgpu_framebuffer(new_state->fb);
6376 obj = new_state->fb->obj[0];
6377 rbo = gem_to_amdgpu_bo(obj);
6378 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6379 INIT_LIST_HEAD(&list);
6383 list_add(&tv.head, &list);
6385 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6387 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6391 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6392 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6394 domain = AMDGPU_GEM_DOMAIN_VRAM;
6396 r = amdgpu_bo_pin(rbo, domain);
6397 if (unlikely(r != 0)) {
6398 if (r != -ERESTARTSYS)
6399 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6400 ttm_eu_backoff_reservation(&ticket, &list);
6404 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6405 if (unlikely(r != 0)) {
6406 amdgpu_bo_unpin(rbo);
6407 ttm_eu_backoff_reservation(&ticket, &list);
6408 DRM_ERROR("%p bind failed\n", rbo);
6412 ttm_eu_backoff_reservation(&ticket, &list);
6414 afb->address = amdgpu_bo_gpu_offset(rbo);
6419 * We don't do surface updates on planes that have been newly created,
6420 * but we also don't have the afb->address during atomic check.
6422 * Fill in buffer attributes depending on the address here, but only on
6423 * newly created planes since they're not being used by DC yet and this
6424 * won't modify global state.
6426 dm_plane_state_old = to_dm_plane_state(plane->state);
6427 dm_plane_state_new = to_dm_plane_state(new_state);
6429 if (dm_plane_state_new->dc_state &&
6430 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6431 struct dc_plane_state *plane_state =
6432 dm_plane_state_new->dc_state;
6433 bool force_disable_dcc = !plane_state->dcc.enable;
6435 fill_plane_buffer_attributes(
6436 adev, afb, plane_state->format, plane_state->rotation,
6438 &plane_state->tiling_info, &plane_state->plane_size,
6439 &plane_state->dcc, &plane_state->address,
6440 afb->tmz_surface, force_disable_dcc);
6446 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6447 struct drm_plane_state *old_state)
6449 struct amdgpu_bo *rbo;
6455 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6456 r = amdgpu_bo_reserve(rbo, false);
6458 DRM_ERROR("failed to reserve rbo before unpin\n");
6462 amdgpu_bo_unpin(rbo);
6463 amdgpu_bo_unreserve(rbo);
6464 amdgpu_bo_unref(&rbo);
6467 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6468 struct drm_crtc_state *new_crtc_state)
6470 struct drm_framebuffer *fb = state->fb;
6471 int min_downscale, max_upscale;
6473 int max_scale = INT_MAX;
6475 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6476 if (fb && state->crtc) {
6477 /* Validate viewport to cover the case when only the position changes */
6478 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6479 int viewport_width = state->crtc_w;
6480 int viewport_height = state->crtc_h;
6482 if (state->crtc_x < 0)
6483 viewport_width += state->crtc_x;
6484 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6485 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6487 if (state->crtc_y < 0)
6488 viewport_height += state->crtc_y;
6489 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6490 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6492 /* If completely outside of screen, viewport_width and/or viewport_height will be negative,
6493 * which is still OK to satisfy the condition below, thereby also covering these cases
6494 * (when plane is completely outside of screen).
6495 * x2 for width is because of pipe-split.
6497 if (viewport_width < MIN_VIEWPORT_SIZE*2 || viewport_height < MIN_VIEWPORT_SIZE)
6501 /* Get min/max allowed scaling factors from plane caps. */
6502 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6503 &min_downscale, &max_upscale);
6505 * Convert to drm convention: 16.16 fixed point, instead of dc's
6506 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6507 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6509 min_scale = (1000 << 16) / max_upscale;
6510 max_scale = (1000 << 16) / min_downscale;
6513 return drm_atomic_helper_check_plane_state(
6514 state, new_crtc_state, min_scale, max_scale, true, true);
6517 static int dm_plane_atomic_check(struct drm_plane *plane,
6518 struct drm_plane_state *state)
6520 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6521 struct dc *dc = adev->dm.dc;
6522 struct dm_plane_state *dm_plane_state;
6523 struct dc_scaling_info scaling_info;
6524 struct drm_crtc_state *new_crtc_state;
6527 trace_amdgpu_dm_plane_atomic_check(state);
6529 dm_plane_state = to_dm_plane_state(state);
6531 if (!dm_plane_state->dc_state)
6535 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6536 if (!new_crtc_state)
6539 ret = dm_plane_helper_check_state(state, new_crtc_state);
6543 ret = fill_dc_scaling_info(state, &scaling_info);
6547 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6553 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6554 struct drm_plane_state *new_plane_state)
6556 /* Only support async updates on cursor planes. */
6557 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6563 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6564 struct drm_plane_state *new_state)
6566 struct drm_plane_state *old_state =
6567 drm_atomic_get_old_plane_state(new_state->state, plane);
6569 trace_amdgpu_dm_atomic_update_cursor(new_state);
6571 swap(plane->state->fb, new_state->fb);
6573 plane->state->src_x = new_state->src_x;
6574 plane->state->src_y = new_state->src_y;
6575 plane->state->src_w = new_state->src_w;
6576 plane->state->src_h = new_state->src_h;
6577 plane->state->crtc_x = new_state->crtc_x;
6578 plane->state->crtc_y = new_state->crtc_y;
6579 plane->state->crtc_w = new_state->crtc_w;
6580 plane->state->crtc_h = new_state->crtc_h;
6582 handle_cursor_update(plane, old_state);
6585 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6586 .prepare_fb = dm_plane_helper_prepare_fb,
6587 .cleanup_fb = dm_plane_helper_cleanup_fb,
6588 .atomic_check = dm_plane_atomic_check,
6589 .atomic_async_check = dm_plane_atomic_async_check,
6590 .atomic_async_update = dm_plane_atomic_async_update
6594 * TODO: these are currently initialized to rgb formats only.
6595 * For future use cases we should either initialize them dynamically based on
6596 * plane capabilities, or initialize this array to all formats, so internal drm
6597 * check will succeed, and let DC implement proper check
6599 static const uint32_t rgb_formats[] = {
6600 DRM_FORMAT_XRGB8888,
6601 DRM_FORMAT_ARGB8888,
6602 DRM_FORMAT_RGBA8888,
6603 DRM_FORMAT_XRGB2101010,
6604 DRM_FORMAT_XBGR2101010,
6605 DRM_FORMAT_ARGB2101010,
6606 DRM_FORMAT_ABGR2101010,
6607 DRM_FORMAT_XBGR8888,
6608 DRM_FORMAT_ABGR8888,
6612 static const uint32_t overlay_formats[] = {
6613 DRM_FORMAT_XRGB8888,
6614 DRM_FORMAT_ARGB8888,
6615 DRM_FORMAT_RGBA8888,
6616 DRM_FORMAT_XBGR8888,
6617 DRM_FORMAT_ABGR8888,
6621 static const u32 cursor_formats[] = {
6625 static int get_plane_formats(const struct drm_plane *plane,
6626 const struct dc_plane_cap *plane_cap,
6627 uint32_t *formats, int max_formats)
6629 int i, num_formats = 0;
6632 * TODO: Query support for each group of formats directly from
6633 * DC plane caps. This will require adding more formats to the
6637 switch (plane->type) {
6638 case DRM_PLANE_TYPE_PRIMARY:
6639 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6640 if (num_formats >= max_formats)
6643 formats[num_formats++] = rgb_formats[i];
6646 if (plane_cap && plane_cap->pixel_format_support.nv12)
6647 formats[num_formats++] = DRM_FORMAT_NV12;
6648 if (plane_cap && plane_cap->pixel_format_support.p010)
6649 formats[num_formats++] = DRM_FORMAT_P010;
6650 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6651 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6652 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6653 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6654 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6658 case DRM_PLANE_TYPE_OVERLAY:
6659 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6660 if (num_formats >= max_formats)
6663 formats[num_formats++] = overlay_formats[i];
6667 case DRM_PLANE_TYPE_CURSOR:
6668 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6669 if (num_formats >= max_formats)
6672 formats[num_formats++] = cursor_formats[i];
6680 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6681 struct drm_plane *plane,
6682 unsigned long possible_crtcs,
6683 const struct dc_plane_cap *plane_cap)
6685 uint32_t formats[32];
6688 unsigned int supported_rotations;
6689 uint64_t *modifiers = NULL;
6691 num_formats = get_plane_formats(plane, plane_cap, formats,
6692 ARRAY_SIZE(formats));
6694 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6698 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6699 &dm_plane_funcs, formats, num_formats,
6700 modifiers, plane->type, NULL);
6705 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6706 plane_cap && plane_cap->per_pixel_alpha) {
6707 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6708 BIT(DRM_MODE_BLEND_PREMULTI);
6710 drm_plane_create_alpha_property(plane);
6711 drm_plane_create_blend_mode_property(plane, blend_caps);
6714 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6716 (plane_cap->pixel_format_support.nv12 ||
6717 plane_cap->pixel_format_support.p010)) {
6718 /* This only affects YUV formats. */
6719 drm_plane_create_color_properties(
6721 BIT(DRM_COLOR_YCBCR_BT601) |
6722 BIT(DRM_COLOR_YCBCR_BT709) |
6723 BIT(DRM_COLOR_YCBCR_BT2020),
6724 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6725 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6726 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6729 supported_rotations =
6730 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6731 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6733 if (dm->adev->asic_type >= CHIP_BONAIRE &&
6734 plane->type != DRM_PLANE_TYPE_CURSOR)
6735 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6736 supported_rotations);
6738 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6740 /* Create (reset) the plane state */
6741 if (plane->funcs->reset)
6742 plane->funcs->reset(plane);
6747 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6748 struct drm_plane *plane,
6749 uint32_t crtc_index)
6751 struct amdgpu_crtc *acrtc = NULL;
6752 struct drm_plane *cursor_plane;
6756 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6760 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6761 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6763 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6767 res = drm_crtc_init_with_planes(
6772 &amdgpu_dm_crtc_funcs, NULL);
6777 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6779 /* Create (reset) the plane state */
6780 if (acrtc->base.funcs->reset)
6781 acrtc->base.funcs->reset(&acrtc->base);
6783 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6784 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6786 acrtc->crtc_id = crtc_index;
6787 acrtc->base.enabled = false;
6788 acrtc->otg_inst = -1;
6790 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6791 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6792 true, MAX_COLOR_LUT_ENTRIES);
6793 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6799 kfree(cursor_plane);
6804 static int to_drm_connector_type(enum signal_type st)
6807 case SIGNAL_TYPE_HDMI_TYPE_A:
6808 return DRM_MODE_CONNECTOR_HDMIA;
6809 case SIGNAL_TYPE_EDP:
6810 return DRM_MODE_CONNECTOR_eDP;
6811 case SIGNAL_TYPE_LVDS:
6812 return DRM_MODE_CONNECTOR_LVDS;
6813 case SIGNAL_TYPE_RGB:
6814 return DRM_MODE_CONNECTOR_VGA;
6815 case SIGNAL_TYPE_DISPLAY_PORT:
6816 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6817 return DRM_MODE_CONNECTOR_DisplayPort;
6818 case SIGNAL_TYPE_DVI_DUAL_LINK:
6819 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6820 return DRM_MODE_CONNECTOR_DVID;
6821 case SIGNAL_TYPE_VIRTUAL:
6822 return DRM_MODE_CONNECTOR_VIRTUAL;
6825 return DRM_MODE_CONNECTOR_Unknown;
6829 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6831 struct drm_encoder *encoder;
6833 /* There is only one encoder per connector */
6834 drm_connector_for_each_possible_encoder(connector, encoder)
6840 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6842 struct drm_encoder *encoder;
6843 struct amdgpu_encoder *amdgpu_encoder;
6845 encoder = amdgpu_dm_connector_to_encoder(connector);
6847 if (encoder == NULL)
6850 amdgpu_encoder = to_amdgpu_encoder(encoder);
6852 amdgpu_encoder->native_mode.clock = 0;
6854 if (!list_empty(&connector->probed_modes)) {
6855 struct drm_display_mode *preferred_mode = NULL;
6857 list_for_each_entry(preferred_mode,
6858 &connector->probed_modes,
6860 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6861 amdgpu_encoder->native_mode = *preferred_mode;
6869 static struct drm_display_mode *
6870 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6872 int hdisplay, int vdisplay)
6874 struct drm_device *dev = encoder->dev;
6875 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6876 struct drm_display_mode *mode = NULL;
6877 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6879 mode = drm_mode_duplicate(dev, native_mode);
6884 mode->hdisplay = hdisplay;
6885 mode->vdisplay = vdisplay;
6886 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6887 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6893 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6894 struct drm_connector *connector)
6896 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6897 struct drm_display_mode *mode = NULL;
6898 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6899 struct amdgpu_dm_connector *amdgpu_dm_connector =
6900 to_amdgpu_dm_connector(connector);
6904 char name[DRM_DISPLAY_MODE_LEN];
6907 } common_modes[] = {
6908 { "640x480", 640, 480},
6909 { "800x600", 800, 600},
6910 { "1024x768", 1024, 768},
6911 { "1280x720", 1280, 720},
6912 { "1280x800", 1280, 800},
6913 {"1280x1024", 1280, 1024},
6914 { "1440x900", 1440, 900},
6915 {"1680x1050", 1680, 1050},
6916 {"1600x1200", 1600, 1200},
6917 {"1920x1080", 1920, 1080},
6918 {"1920x1200", 1920, 1200}
6921 n = ARRAY_SIZE(common_modes);
6923 for (i = 0; i < n; i++) {
6924 struct drm_display_mode *curmode = NULL;
6925 bool mode_existed = false;
6927 if (common_modes[i].w > native_mode->hdisplay ||
6928 common_modes[i].h > native_mode->vdisplay ||
6929 (common_modes[i].w == native_mode->hdisplay &&
6930 common_modes[i].h == native_mode->vdisplay))
6933 list_for_each_entry(curmode, &connector->probed_modes, head) {
6934 if (common_modes[i].w == curmode->hdisplay &&
6935 common_modes[i].h == curmode->vdisplay) {
6936 mode_existed = true;
6944 mode = amdgpu_dm_create_common_mode(encoder,
6945 common_modes[i].name, common_modes[i].w,
6947 drm_mode_probed_add(connector, mode);
6948 amdgpu_dm_connector->num_modes++;
6952 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6955 struct amdgpu_dm_connector *amdgpu_dm_connector =
6956 to_amdgpu_dm_connector(connector);
6959 /* empty probed_modes */
6960 INIT_LIST_HEAD(&connector->probed_modes);
6961 amdgpu_dm_connector->num_modes =
6962 drm_add_edid_modes(connector, edid);
6964 /* sorting the probed modes before calling function
6965 * amdgpu_dm_get_native_mode() since EDID can have
6966 * more than one preferred mode. The modes that are
6967 * later in the probed mode list could be of higher
6968 * and preferred resolution. For example, 3840x2160
6969 * resolution in base EDID preferred timing and 4096x2160
6970 * preferred resolution in DID extension block later.
6972 drm_mode_sort(&connector->probed_modes);
6973 amdgpu_dm_get_native_mode(connector);
6975 amdgpu_dm_connector->num_modes = 0;
6979 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6981 struct amdgpu_dm_connector *amdgpu_dm_connector =
6982 to_amdgpu_dm_connector(connector);
6983 struct drm_encoder *encoder;
6984 struct edid *edid = amdgpu_dm_connector->edid;
6986 encoder = amdgpu_dm_connector_to_encoder(connector);
6988 if (!drm_edid_is_valid(edid)) {
6989 amdgpu_dm_connector->num_modes =
6990 drm_add_modes_noedid(connector, 640, 480);
6992 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6993 amdgpu_dm_connector_add_common_modes(encoder, connector);
6995 amdgpu_dm_fbc_init(connector);
6997 return amdgpu_dm_connector->num_modes;
7000 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7001 struct amdgpu_dm_connector *aconnector,
7003 struct dc_link *link,
7006 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7009 * Some of the properties below require access to state, like bpc.
7010 * Allocate some default initial connector state with our reset helper.
7012 if (aconnector->base.funcs->reset)
7013 aconnector->base.funcs->reset(&aconnector->base);
7015 aconnector->connector_id = link_index;
7016 aconnector->dc_link = link;
7017 aconnector->base.interlace_allowed = false;
7018 aconnector->base.doublescan_allowed = false;
7019 aconnector->base.stereo_allowed = false;
7020 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7021 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7022 aconnector->audio_inst = -1;
7023 mutex_init(&aconnector->hpd_lock);
7026 * configure support HPD hot plug connector_>polled default value is 0
7027 * which means HPD hot plug not supported
7029 switch (connector_type) {
7030 case DRM_MODE_CONNECTOR_HDMIA:
7031 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7032 aconnector->base.ycbcr_420_allowed =
7033 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7035 case DRM_MODE_CONNECTOR_DisplayPort:
7036 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7037 aconnector->base.ycbcr_420_allowed =
7038 link->link_enc->features.dp_ycbcr420_supported ? true : false;
7040 case DRM_MODE_CONNECTOR_DVID:
7041 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7047 drm_object_attach_property(&aconnector->base.base,
7048 dm->ddev->mode_config.scaling_mode_property,
7049 DRM_MODE_SCALE_NONE);
7051 drm_object_attach_property(&aconnector->base.base,
7052 adev->mode_info.underscan_property,
7054 drm_object_attach_property(&aconnector->base.base,
7055 adev->mode_info.underscan_hborder_property,
7057 drm_object_attach_property(&aconnector->base.base,
7058 adev->mode_info.underscan_vborder_property,
7061 if (!aconnector->mst_port)
7062 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7064 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7065 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7066 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7068 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7069 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7070 drm_object_attach_property(&aconnector->base.base,
7071 adev->mode_info.abm_level_property, 0);
7074 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7075 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7076 connector_type == DRM_MODE_CONNECTOR_eDP) {
7077 drm_object_attach_property(
7078 &aconnector->base.base,
7079 dm->ddev->mode_config.hdr_output_metadata_property, 0);
7081 if (!aconnector->mst_port)
7082 drm_connector_attach_vrr_capable_property(&aconnector->base);
7084 #ifdef CONFIG_DRM_AMD_DC_HDCP
7085 if (adev->dm.hdcp_workqueue)
7086 drm_connector_attach_content_protection_property(&aconnector->base, true);
7091 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7092 struct i2c_msg *msgs, int num)
7094 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7095 struct ddc_service *ddc_service = i2c->ddc_service;
7096 struct i2c_command cmd;
7100 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7105 cmd.number_of_payloads = num;
7106 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7109 for (i = 0; i < num; i++) {
7110 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7111 cmd.payloads[i].address = msgs[i].addr;
7112 cmd.payloads[i].length = msgs[i].len;
7113 cmd.payloads[i].data = msgs[i].buf;
7117 ddc_service->ctx->dc,
7118 ddc_service->ddc_pin->hw_info.ddc_channel,
7122 kfree(cmd.payloads);
7126 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7128 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7131 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7132 .master_xfer = amdgpu_dm_i2c_xfer,
7133 .functionality = amdgpu_dm_i2c_func,
7136 static struct amdgpu_i2c_adapter *
7137 create_i2c(struct ddc_service *ddc_service,
7141 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7142 struct amdgpu_i2c_adapter *i2c;
7144 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7147 i2c->base.owner = THIS_MODULE;
7148 i2c->base.class = I2C_CLASS_DDC;
7149 i2c->base.dev.parent = &adev->pdev->dev;
7150 i2c->base.algo = &amdgpu_dm_i2c_algo;
7151 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7152 i2c_set_adapdata(&i2c->base, i2c);
7153 i2c->ddc_service = ddc_service;
7154 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7161 * Note: this function assumes that dc_link_detect() was called for the
7162 * dc_link which will be represented by this aconnector.
7164 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7165 struct amdgpu_dm_connector *aconnector,
7166 uint32_t link_index,
7167 struct amdgpu_encoder *aencoder)
7171 struct dc *dc = dm->dc;
7172 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7173 struct amdgpu_i2c_adapter *i2c;
7175 link->priv = aconnector;
7177 DRM_DEBUG_DRIVER("%s()\n", __func__);
7179 i2c = create_i2c(link->ddc, link->link_index, &res);
7181 DRM_ERROR("Failed to create i2c adapter data\n");
7185 aconnector->i2c = i2c;
7186 res = i2c_add_adapter(&i2c->base);
7189 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7193 connector_type = to_drm_connector_type(link->connector_signal);
7195 res = drm_connector_init_with_ddc(
7198 &amdgpu_dm_connector_funcs,
7203 DRM_ERROR("connector_init failed\n");
7204 aconnector->connector_id = -1;
7208 drm_connector_helper_add(
7210 &amdgpu_dm_connector_helper_funcs);
7212 amdgpu_dm_connector_init_helper(
7219 drm_connector_attach_encoder(
7220 &aconnector->base, &aencoder->base);
7222 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7223 || connector_type == DRM_MODE_CONNECTOR_eDP)
7224 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7229 aconnector->i2c = NULL;
7234 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7236 switch (adev->mode_info.num_crtc) {
7253 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7254 struct amdgpu_encoder *aencoder,
7255 uint32_t link_index)
7257 struct amdgpu_device *adev = drm_to_adev(dev);
7259 int res = drm_encoder_init(dev,
7261 &amdgpu_dm_encoder_funcs,
7262 DRM_MODE_ENCODER_TMDS,
7265 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7268 aencoder->encoder_id = link_index;
7270 aencoder->encoder_id = -1;
7272 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7277 static void manage_dm_interrupts(struct amdgpu_device *adev,
7278 struct amdgpu_crtc *acrtc,
7282 * We have no guarantee that the frontend index maps to the same
7283 * backend index - some even map to more than one.
7285 * TODO: Use a different interrupt or check DC itself for the mapping.
7288 amdgpu_display_crtc_idx_to_irq_type(
7293 drm_crtc_vblank_on(&acrtc->base);
7296 &adev->pageflip_irq,
7302 &adev->pageflip_irq,
7304 drm_crtc_vblank_off(&acrtc->base);
7308 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7309 struct amdgpu_crtc *acrtc)
7312 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7315 * This reads the current state for the IRQ and force reapplies
7316 * the setting to hardware.
7318 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7322 is_scaling_state_different(const struct dm_connector_state *dm_state,
7323 const struct dm_connector_state *old_dm_state)
7325 if (dm_state->scaling != old_dm_state->scaling)
7327 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7328 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7330 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7331 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7333 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7334 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7339 #ifdef CONFIG_DRM_AMD_DC_HDCP
7340 static bool is_content_protection_different(struct drm_connector_state *state,
7341 const struct drm_connector_state *old_state,
7342 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7344 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7345 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7347 /* Handle: Type0/1 change */
7348 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7349 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7350 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7354 /* CP is being re enabled, ignore this
7356 * Handles: ENABLED -> DESIRED
7358 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7359 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7360 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7364 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7366 * Handles: UNDESIRED -> ENABLED
7368 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7369 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7370 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7372 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7373 * hot-plug, headless s3, dpms
7375 * Handles: DESIRED -> DESIRED (Special case)
7377 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7378 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7379 dm_con_state->update_hdcp = false;
7384 * Handles: UNDESIRED -> UNDESIRED
7385 * DESIRED -> DESIRED
7386 * ENABLED -> ENABLED
7388 if (old_state->content_protection == state->content_protection)
7392 * Handles: UNDESIRED -> DESIRED
7393 * DESIRED -> UNDESIRED
7394 * ENABLED -> UNDESIRED
7396 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7400 * Handles: DESIRED -> ENABLED
7406 static void remove_stream(struct amdgpu_device *adev,
7407 struct amdgpu_crtc *acrtc,
7408 struct dc_stream_state *stream)
7410 /* this is the update mode case */
7412 acrtc->otg_inst = -1;
7413 acrtc->enabled = false;
7416 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7417 struct dc_cursor_position *position)
7419 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7421 int xorigin = 0, yorigin = 0;
7423 position->enable = false;
7427 if (!crtc || !plane->state->fb)
7430 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7431 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7432 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7434 plane->state->crtc_w,
7435 plane->state->crtc_h);
7439 x = plane->state->crtc_x;
7440 y = plane->state->crtc_y;
7442 if (x <= -amdgpu_crtc->max_cursor_width ||
7443 y <= -amdgpu_crtc->max_cursor_height)
7447 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7451 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7454 position->enable = true;
7455 position->translate_by_source = true;
7458 position->x_hotspot = xorigin;
7459 position->y_hotspot = yorigin;
7464 static void handle_cursor_update(struct drm_plane *plane,
7465 struct drm_plane_state *old_plane_state)
7467 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7468 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7469 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7470 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7471 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7472 uint64_t address = afb ? afb->address : 0;
7473 struct dc_cursor_position position;
7474 struct dc_cursor_attributes attributes;
7477 if (!plane->state->fb && !old_plane_state->fb)
7480 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7482 amdgpu_crtc->crtc_id,
7483 plane->state->crtc_w,
7484 plane->state->crtc_h);
7486 ret = get_cursor_position(plane, crtc, &position);
7490 if (!position.enable) {
7491 /* turn off cursor */
7492 if (crtc_state && crtc_state->stream) {
7493 mutex_lock(&adev->dm.dc_lock);
7494 dc_stream_set_cursor_position(crtc_state->stream,
7496 mutex_unlock(&adev->dm.dc_lock);
7501 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7502 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7504 memset(&attributes, 0, sizeof(attributes));
7505 attributes.address.high_part = upper_32_bits(address);
7506 attributes.address.low_part = lower_32_bits(address);
7507 attributes.width = plane->state->crtc_w;
7508 attributes.height = plane->state->crtc_h;
7509 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7510 attributes.rotation_angle = 0;
7511 attributes.attribute_flags.value = 0;
7513 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7515 if (crtc_state->stream) {
7516 mutex_lock(&adev->dm.dc_lock);
7517 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7519 DRM_ERROR("DC failed to set cursor attributes\n");
7521 if (!dc_stream_set_cursor_position(crtc_state->stream,
7523 DRM_ERROR("DC failed to set cursor position\n");
7524 mutex_unlock(&adev->dm.dc_lock);
7528 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7531 assert_spin_locked(&acrtc->base.dev->event_lock);
7532 WARN_ON(acrtc->event);
7534 acrtc->event = acrtc->base.state->event;
7536 /* Set the flip status */
7537 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7539 /* Mark this event as consumed */
7540 acrtc->base.state->event = NULL;
7542 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7546 static void update_freesync_state_on_stream(
7547 struct amdgpu_display_manager *dm,
7548 struct dm_crtc_state *new_crtc_state,
7549 struct dc_stream_state *new_stream,
7550 struct dc_plane_state *surface,
7551 u32 flip_timestamp_in_us)
7553 struct mod_vrr_params vrr_params;
7554 struct dc_info_packet vrr_infopacket = {0};
7555 struct amdgpu_device *adev = dm->adev;
7556 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7557 unsigned long flags;
7563 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7564 * For now it's sufficient to just guard against these conditions.
7567 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7570 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7571 vrr_params = acrtc->dm_irq_params.vrr_params;
7574 mod_freesync_handle_preflip(
7575 dm->freesync_module,
7578 flip_timestamp_in_us,
7581 if (adev->family < AMDGPU_FAMILY_AI &&
7582 amdgpu_dm_vrr_active(new_crtc_state)) {
7583 mod_freesync_handle_v_update(dm->freesync_module,
7584 new_stream, &vrr_params);
7586 /* Need to call this before the frame ends. */
7587 dc_stream_adjust_vmin_vmax(dm->dc,
7588 new_crtc_state->stream,
7589 &vrr_params.adjust);
7593 mod_freesync_build_vrr_infopacket(
7594 dm->freesync_module,
7598 TRANSFER_FUNC_UNKNOWN,
7601 new_crtc_state->freesync_timing_changed |=
7602 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7604 sizeof(vrr_params.adjust)) != 0);
7606 new_crtc_state->freesync_vrr_info_changed |=
7607 (memcmp(&new_crtc_state->vrr_infopacket,
7609 sizeof(vrr_infopacket)) != 0);
7611 acrtc->dm_irq_params.vrr_params = vrr_params;
7612 new_crtc_state->vrr_infopacket = vrr_infopacket;
7614 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7615 new_stream->vrr_infopacket = vrr_infopacket;
7617 if (new_crtc_state->freesync_vrr_info_changed)
7618 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7619 new_crtc_state->base.crtc->base.id,
7620 (int)new_crtc_state->base.vrr_enabled,
7621 (int)vrr_params.state);
7623 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7626 static void update_stream_irq_parameters(
7627 struct amdgpu_display_manager *dm,
7628 struct dm_crtc_state *new_crtc_state)
7630 struct dc_stream_state *new_stream = new_crtc_state->stream;
7631 struct mod_vrr_params vrr_params;
7632 struct mod_freesync_config config = new_crtc_state->freesync_config;
7633 struct amdgpu_device *adev = dm->adev;
7634 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7635 unsigned long flags;
7641 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7642 * For now it's sufficient to just guard against these conditions.
7644 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7647 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7648 vrr_params = acrtc->dm_irq_params.vrr_params;
7650 if (new_crtc_state->vrr_supported &&
7651 config.min_refresh_in_uhz &&
7652 config.max_refresh_in_uhz) {
7653 config.state = new_crtc_state->base.vrr_enabled ?
7654 VRR_STATE_ACTIVE_VARIABLE :
7657 config.state = VRR_STATE_UNSUPPORTED;
7660 mod_freesync_build_vrr_params(dm->freesync_module,
7662 &config, &vrr_params);
7664 new_crtc_state->freesync_timing_changed |=
7665 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7666 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7668 new_crtc_state->freesync_config = config;
7669 /* Copy state for access from DM IRQ handler */
7670 acrtc->dm_irq_params.freesync_config = config;
7671 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7672 acrtc->dm_irq_params.vrr_params = vrr_params;
7673 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7676 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7677 struct dm_crtc_state *new_state)
7679 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7680 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7682 if (!old_vrr_active && new_vrr_active) {
7683 /* Transition VRR inactive -> active:
7684 * While VRR is active, we must not disable vblank irq, as a
7685 * reenable after disable would compute bogus vblank/pflip
7686 * timestamps if it likely happened inside display front-porch.
7688 * We also need vupdate irq for the actual core vblank handling
7691 dm_set_vupdate_irq(new_state->base.crtc, true);
7692 drm_crtc_vblank_get(new_state->base.crtc);
7693 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7694 __func__, new_state->base.crtc->base.id);
7695 } else if (old_vrr_active && !new_vrr_active) {
7696 /* Transition VRR active -> inactive:
7697 * Allow vblank irq disable again for fixed refresh rate.
7699 dm_set_vupdate_irq(new_state->base.crtc, false);
7700 drm_crtc_vblank_put(new_state->base.crtc);
7701 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7702 __func__, new_state->base.crtc->base.id);
7706 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7708 struct drm_plane *plane;
7709 struct drm_plane_state *old_plane_state, *new_plane_state;
7713 * TODO: Make this per-stream so we don't issue redundant updates for
7714 * commits with multiple streams.
7716 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7718 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7719 handle_cursor_update(plane, old_plane_state);
7722 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7723 struct dc_state *dc_state,
7724 struct drm_device *dev,
7725 struct amdgpu_display_manager *dm,
7726 struct drm_crtc *pcrtc,
7727 bool wait_for_vblank)
7730 uint64_t timestamp_ns;
7731 struct drm_plane *plane;
7732 struct drm_plane_state *old_plane_state, *new_plane_state;
7733 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7734 struct drm_crtc_state *new_pcrtc_state =
7735 drm_atomic_get_new_crtc_state(state, pcrtc);
7736 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7737 struct dm_crtc_state *dm_old_crtc_state =
7738 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7739 int planes_count = 0, vpos, hpos;
7741 unsigned long flags;
7742 struct amdgpu_bo *abo;
7743 uint32_t target_vblank, last_flip_vblank;
7744 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7745 bool pflip_present = false;
7747 struct dc_surface_update surface_updates[MAX_SURFACES];
7748 struct dc_plane_info plane_infos[MAX_SURFACES];
7749 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7750 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7751 struct dc_stream_update stream_update;
7754 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7757 dm_error("Failed to allocate update bundle\n");
7762 * Disable the cursor first if we're disabling all the planes.
7763 * It'll remain on the screen after the planes are re-enabled
7766 if (acrtc_state->active_planes == 0)
7767 amdgpu_dm_commit_cursors(state);
7769 /* update planes when needed */
7770 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7771 struct drm_crtc *crtc = new_plane_state->crtc;
7772 struct drm_crtc_state *new_crtc_state;
7773 struct drm_framebuffer *fb = new_plane_state->fb;
7774 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7775 bool plane_needs_flip;
7776 struct dc_plane_state *dc_plane;
7777 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7779 /* Cursor plane is handled after stream updates */
7780 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7783 if (!fb || !crtc || pcrtc != crtc)
7786 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7787 if (!new_crtc_state->active)
7790 dc_plane = dm_new_plane_state->dc_state;
7792 bundle->surface_updates[planes_count].surface = dc_plane;
7793 if (new_pcrtc_state->color_mgmt_changed) {
7794 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7795 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7796 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7799 fill_dc_scaling_info(new_plane_state,
7800 &bundle->scaling_infos[planes_count]);
7802 bundle->surface_updates[planes_count].scaling_info =
7803 &bundle->scaling_infos[planes_count];
7805 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7807 pflip_present = pflip_present || plane_needs_flip;
7809 if (!plane_needs_flip) {
7814 abo = gem_to_amdgpu_bo(fb->obj[0]);
7817 * Wait for all fences on this FB. Do limited wait to avoid
7818 * deadlock during GPU reset when this fence will not signal
7819 * but we hold reservation lock for the BO.
7821 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7823 msecs_to_jiffies(5000));
7824 if (unlikely(r <= 0))
7825 DRM_ERROR("Waiting for fences timed out!");
7827 fill_dc_plane_info_and_addr(
7828 dm->adev, new_plane_state,
7830 &bundle->plane_infos[planes_count],
7831 &bundle->flip_addrs[planes_count].address,
7832 afb->tmz_surface, false);
7834 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7835 new_plane_state->plane->index,
7836 bundle->plane_infos[planes_count].dcc.enable);
7838 bundle->surface_updates[planes_count].plane_info =
7839 &bundle->plane_infos[planes_count];
7842 * Only allow immediate flips for fast updates that don't
7843 * change FB pitch, DCC state, rotation or mirroing.
7845 bundle->flip_addrs[planes_count].flip_immediate =
7846 crtc->state->async_flip &&
7847 acrtc_state->update_type == UPDATE_TYPE_FAST;
7849 timestamp_ns = ktime_get_ns();
7850 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7851 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7852 bundle->surface_updates[planes_count].surface = dc_plane;
7854 if (!bundle->surface_updates[planes_count].surface) {
7855 DRM_ERROR("No surface for CRTC: id=%d\n",
7856 acrtc_attach->crtc_id);
7860 if (plane == pcrtc->primary)
7861 update_freesync_state_on_stream(
7864 acrtc_state->stream,
7866 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7868 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7870 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7871 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7877 if (pflip_present) {
7879 /* Use old throttling in non-vrr fixed refresh rate mode
7880 * to keep flip scheduling based on target vblank counts
7881 * working in a backwards compatible way, e.g., for
7882 * clients using the GLX_OML_sync_control extension or
7883 * DRI3/Present extension with defined target_msc.
7885 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7888 /* For variable refresh rate mode only:
7889 * Get vblank of last completed flip to avoid > 1 vrr
7890 * flips per video frame by use of throttling, but allow
7891 * flip programming anywhere in the possibly large
7892 * variable vrr vblank interval for fine-grained flip
7893 * timing control and more opportunity to avoid stutter
7894 * on late submission of flips.
7896 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7897 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7898 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7901 target_vblank = last_flip_vblank + wait_for_vblank;
7904 * Wait until we're out of the vertical blank period before the one
7905 * targeted by the flip
7907 while ((acrtc_attach->enabled &&
7908 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7909 0, &vpos, &hpos, NULL,
7910 NULL, &pcrtc->hwmode)
7911 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7912 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7913 (int)(target_vblank -
7914 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7915 usleep_range(1000, 1100);
7919 * Prepare the flip event for the pageflip interrupt to handle.
7921 * This only works in the case where we've already turned on the
7922 * appropriate hardware blocks (eg. HUBP) so in the transition case
7923 * from 0 -> n planes we have to skip a hardware generated event
7924 * and rely on sending it from software.
7926 if (acrtc_attach->base.state->event &&
7927 acrtc_state->active_planes > 0) {
7928 drm_crtc_vblank_get(pcrtc);
7930 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7932 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7933 prepare_flip_isr(acrtc_attach);
7935 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7938 if (acrtc_state->stream) {
7939 if (acrtc_state->freesync_vrr_info_changed)
7940 bundle->stream_update.vrr_infopacket =
7941 &acrtc_state->stream->vrr_infopacket;
7945 /* Update the planes if changed or disable if we don't have any. */
7946 if ((planes_count || acrtc_state->active_planes == 0) &&
7947 acrtc_state->stream) {
7948 bundle->stream_update.stream = acrtc_state->stream;
7949 if (new_pcrtc_state->mode_changed) {
7950 bundle->stream_update.src = acrtc_state->stream->src;
7951 bundle->stream_update.dst = acrtc_state->stream->dst;
7954 if (new_pcrtc_state->color_mgmt_changed) {
7956 * TODO: This isn't fully correct since we've actually
7957 * already modified the stream in place.
7959 bundle->stream_update.gamut_remap =
7960 &acrtc_state->stream->gamut_remap_matrix;
7961 bundle->stream_update.output_csc_transform =
7962 &acrtc_state->stream->csc_color_matrix;
7963 bundle->stream_update.out_transfer_func =
7964 acrtc_state->stream->out_transfer_func;
7967 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7968 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7969 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7972 * If FreeSync state on the stream has changed then we need to
7973 * re-adjust the min/max bounds now that DC doesn't handle this
7974 * as part of commit.
7976 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7977 amdgpu_dm_vrr_active(acrtc_state)) {
7978 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7979 dc_stream_adjust_vmin_vmax(
7980 dm->dc, acrtc_state->stream,
7981 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7982 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7984 mutex_lock(&dm->dc_lock);
7985 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7986 acrtc_state->stream->link->psr_settings.psr_allow_active)
7987 amdgpu_dm_psr_disable(acrtc_state->stream);
7989 dc_commit_updates_for_stream(dm->dc,
7990 bundle->surface_updates,
7992 acrtc_state->stream,
7993 &bundle->stream_update,
7997 * Enable or disable the interrupts on the backend.
7999 * Most pipes are put into power gating when unused.
8001 * When power gating is enabled on a pipe we lose the
8002 * interrupt enablement state when power gating is disabled.
8004 * So we need to update the IRQ control state in hardware
8005 * whenever the pipe turns on (since it could be previously
8006 * power gated) or off (since some pipes can't be power gated
8009 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8010 dm_update_pflip_irq_state(drm_to_adev(dev),
8013 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8014 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8015 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8016 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8017 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8018 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8019 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8020 amdgpu_dm_psr_enable(acrtc_state->stream);
8023 mutex_unlock(&dm->dc_lock);
8027 * Update cursor state *after* programming all the planes.
8028 * This avoids redundant programming in the case where we're going
8029 * to be disabling a single plane - those pipes are being disabled.
8031 if (acrtc_state->active_planes)
8032 amdgpu_dm_commit_cursors(state);
8038 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8039 struct drm_atomic_state *state)
8041 struct amdgpu_device *adev = drm_to_adev(dev);
8042 struct amdgpu_dm_connector *aconnector;
8043 struct drm_connector *connector;
8044 struct drm_connector_state *old_con_state, *new_con_state;
8045 struct drm_crtc_state *new_crtc_state;
8046 struct dm_crtc_state *new_dm_crtc_state;
8047 const struct dc_stream_status *status;
8050 /* Notify device removals. */
8051 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8052 if (old_con_state->crtc != new_con_state->crtc) {
8053 /* CRTC changes require notification. */
8057 if (!new_con_state->crtc)
8060 new_crtc_state = drm_atomic_get_new_crtc_state(
8061 state, new_con_state->crtc);
8063 if (!new_crtc_state)
8066 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8070 aconnector = to_amdgpu_dm_connector(connector);
8072 mutex_lock(&adev->dm.audio_lock);
8073 inst = aconnector->audio_inst;
8074 aconnector->audio_inst = -1;
8075 mutex_unlock(&adev->dm.audio_lock);
8077 amdgpu_dm_audio_eld_notify(adev, inst);
8080 /* Notify audio device additions. */
8081 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8082 if (!new_con_state->crtc)
8085 new_crtc_state = drm_atomic_get_new_crtc_state(
8086 state, new_con_state->crtc);
8088 if (!new_crtc_state)
8091 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8094 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8095 if (!new_dm_crtc_state->stream)
8098 status = dc_stream_get_status(new_dm_crtc_state->stream);
8102 aconnector = to_amdgpu_dm_connector(connector);
8104 mutex_lock(&adev->dm.audio_lock);
8105 inst = status->audio_inst;
8106 aconnector->audio_inst = inst;
8107 mutex_unlock(&adev->dm.audio_lock);
8109 amdgpu_dm_audio_eld_notify(adev, inst);
8114 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8115 * @crtc_state: the DRM CRTC state
8116 * @stream_state: the DC stream state.
8118 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8119 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8121 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8122 struct dc_stream_state *stream_state)
8124 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8128 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8129 * @state: The atomic state to commit
8131 * This will tell DC to commit the constructed DC state from atomic_check,
8132 * programming the hardware. Any failures here implies a hardware failure, since
8133 * atomic check should have filtered anything non-kosher.
8135 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8137 struct drm_device *dev = state->dev;
8138 struct amdgpu_device *adev = drm_to_adev(dev);
8139 struct amdgpu_display_manager *dm = &adev->dm;
8140 struct dm_atomic_state *dm_state;
8141 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8143 struct drm_crtc *crtc;
8144 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8145 unsigned long flags;
8146 bool wait_for_vblank = true;
8147 struct drm_connector *connector;
8148 struct drm_connector_state *old_con_state, *new_con_state;
8149 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8150 int crtc_disable_count = 0;
8151 bool mode_set_reset_required = false;
8153 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8155 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8157 dm_state = dm_atomic_get_new_state(state);
8158 if (dm_state && dm_state->context) {
8159 dc_state = dm_state->context;
8161 /* No state changes, retain current state. */
8162 dc_state_temp = dc_create_state(dm->dc);
8163 ASSERT(dc_state_temp);
8164 dc_state = dc_state_temp;
8165 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8168 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8169 new_crtc_state, i) {
8170 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8172 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8174 if (old_crtc_state->active &&
8175 (!new_crtc_state->active ||
8176 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8177 manage_dm_interrupts(adev, acrtc, false);
8178 dc_stream_release(dm_old_crtc_state->stream);
8182 drm_atomic_helper_calc_timestamping_constants(state);
8184 /* update changed items */
8185 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8186 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8188 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8189 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8192 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8193 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8194 "connectors_changed:%d\n",
8196 new_crtc_state->enable,
8197 new_crtc_state->active,
8198 new_crtc_state->planes_changed,
8199 new_crtc_state->mode_changed,
8200 new_crtc_state->active_changed,
8201 new_crtc_state->connectors_changed);
8203 /* Disable cursor if disabling crtc */
8204 if (old_crtc_state->active && !new_crtc_state->active) {
8205 struct dc_cursor_position position;
8207 memset(&position, 0, sizeof(position));
8208 mutex_lock(&dm->dc_lock);
8209 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8210 mutex_unlock(&dm->dc_lock);
8213 /* Copy all transient state flags into dc state */
8214 if (dm_new_crtc_state->stream) {
8215 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8216 dm_new_crtc_state->stream);
8219 /* handles headless hotplug case, updating new_state and
8220 * aconnector as needed
8223 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8225 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8227 if (!dm_new_crtc_state->stream) {
8229 * this could happen because of issues with
8230 * userspace notifications delivery.
8231 * In this case userspace tries to set mode on
8232 * display which is disconnected in fact.
8233 * dc_sink is NULL in this case on aconnector.
8234 * We expect reset mode will come soon.
8236 * This can also happen when unplug is done
8237 * during resume sequence ended
8239 * In this case, we want to pretend we still
8240 * have a sink to keep the pipe running so that
8241 * hw state is consistent with the sw state
8243 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8244 __func__, acrtc->base.base.id);
8248 if (dm_old_crtc_state->stream)
8249 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8251 pm_runtime_get_noresume(dev->dev);
8253 acrtc->enabled = true;
8254 acrtc->hw_mode = new_crtc_state->mode;
8255 crtc->hwmode = new_crtc_state->mode;
8256 mode_set_reset_required = true;
8257 } else if (modereset_required(new_crtc_state)) {
8258 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8259 /* i.e. reset mode */
8260 if (dm_old_crtc_state->stream)
8261 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8262 mode_set_reset_required = true;
8264 } /* for_each_crtc_in_state() */
8267 /* if there mode set or reset, disable eDP PSR */
8268 if (mode_set_reset_required)
8269 amdgpu_dm_psr_disable_all(dm);
8271 dm_enable_per_frame_crtc_master_sync(dc_state);
8272 mutex_lock(&dm->dc_lock);
8273 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8274 mutex_unlock(&dm->dc_lock);
8277 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8278 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8280 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8282 if (dm_new_crtc_state->stream != NULL) {
8283 const struct dc_stream_status *status =
8284 dc_stream_get_status(dm_new_crtc_state->stream);
8287 status = dc_stream_get_status_from_state(dc_state,
8288 dm_new_crtc_state->stream);
8290 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8292 acrtc->otg_inst = status->primary_otg_inst;
8295 #ifdef CONFIG_DRM_AMD_DC_HDCP
8296 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8297 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8298 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8299 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8301 new_crtc_state = NULL;
8304 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8306 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8308 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8309 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8310 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8311 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8312 dm_new_con_state->update_hdcp = true;
8316 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8317 hdcp_update_display(
8318 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8319 new_con_state->hdcp_content_type,
8320 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8325 /* Handle connector state changes */
8326 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8327 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8328 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8329 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8330 struct dc_surface_update dummy_updates[MAX_SURFACES];
8331 struct dc_stream_update stream_update;
8332 struct dc_info_packet hdr_packet;
8333 struct dc_stream_status *status = NULL;
8334 bool abm_changed, hdr_changed, scaling_changed;
8336 memset(&dummy_updates, 0, sizeof(dummy_updates));
8337 memset(&stream_update, 0, sizeof(stream_update));
8340 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8341 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8344 /* Skip any modesets/resets */
8345 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8348 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8349 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8351 scaling_changed = is_scaling_state_different(dm_new_con_state,
8354 abm_changed = dm_new_crtc_state->abm_level !=
8355 dm_old_crtc_state->abm_level;
8358 is_hdr_metadata_different(old_con_state, new_con_state);
8360 if (!scaling_changed && !abm_changed && !hdr_changed)
8363 stream_update.stream = dm_new_crtc_state->stream;
8364 if (scaling_changed) {
8365 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8366 dm_new_con_state, dm_new_crtc_state->stream);
8368 stream_update.src = dm_new_crtc_state->stream->src;
8369 stream_update.dst = dm_new_crtc_state->stream->dst;
8373 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8375 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8379 fill_hdr_info_packet(new_con_state, &hdr_packet);
8380 stream_update.hdr_static_metadata = &hdr_packet;
8383 status = dc_stream_get_status(dm_new_crtc_state->stream);
8385 WARN_ON(!status->plane_count);
8388 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8389 * Here we create an empty update on each plane.
8390 * To fix this, DC should permit updating only stream properties.
8392 for (j = 0; j < status->plane_count; j++)
8393 dummy_updates[j].surface = status->plane_states[0];
8396 mutex_lock(&dm->dc_lock);
8397 dc_commit_updates_for_stream(dm->dc,
8399 status->plane_count,
8400 dm_new_crtc_state->stream,
8403 mutex_unlock(&dm->dc_lock);
8406 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8407 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8408 new_crtc_state, i) {
8409 if (old_crtc_state->active && !new_crtc_state->active)
8410 crtc_disable_count++;
8412 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8413 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8415 /* For freesync config update on crtc state and params for irq */
8416 update_stream_irq_parameters(dm, dm_new_crtc_state);
8418 /* Handle vrr on->off / off->on transitions */
8419 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8424 * Enable interrupts for CRTCs that are newly enabled or went through
8425 * a modeset. It was intentionally deferred until after the front end
8426 * state was modified to wait until the OTG was on and so the IRQ
8427 * handlers didn't access stale or invalid state.
8429 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8430 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8432 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8434 if (new_crtc_state->active &&
8435 (!old_crtc_state->active ||
8436 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8437 dc_stream_retain(dm_new_crtc_state->stream);
8438 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8439 manage_dm_interrupts(adev, acrtc, true);
8441 #ifdef CONFIG_DEBUG_FS
8443 * Frontend may have changed so reapply the CRC capture
8444 * settings for the stream.
8446 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8448 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8449 amdgpu_dm_crtc_configure_crc_source(
8450 crtc, dm_new_crtc_state,
8451 dm_new_crtc_state->crc_src);
8457 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8458 if (new_crtc_state->async_flip)
8459 wait_for_vblank = false;
8461 /* update planes when needed per crtc*/
8462 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8463 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8465 if (dm_new_crtc_state->stream)
8466 amdgpu_dm_commit_planes(state, dc_state, dev,
8467 dm, crtc, wait_for_vblank);
8470 /* Update audio instances for each connector. */
8471 amdgpu_dm_commit_audio(dev, state);
8474 * send vblank event on all events not handled in flip and
8475 * mark consumed event for drm_atomic_helper_commit_hw_done
8477 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8478 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8480 if (new_crtc_state->event)
8481 drm_send_event_locked(dev, &new_crtc_state->event->base);
8483 new_crtc_state->event = NULL;
8485 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8487 /* Signal HW programming completion */
8488 drm_atomic_helper_commit_hw_done(state);
8490 if (wait_for_vblank)
8491 drm_atomic_helper_wait_for_flip_done(dev, state);
8493 drm_atomic_helper_cleanup_planes(dev, state);
8495 /* return the stolen vga memory back to VRAM */
8496 if (!adev->mman.keep_stolen_vga_memory)
8497 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8498 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8501 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8502 * so we can put the GPU into runtime suspend if we're not driving any
8505 for (i = 0; i < crtc_disable_count; i++)
8506 pm_runtime_put_autosuspend(dev->dev);
8507 pm_runtime_mark_last_busy(dev->dev);
8510 dc_release_state(dc_state_temp);
8514 static int dm_force_atomic_commit(struct drm_connector *connector)
8517 struct drm_device *ddev = connector->dev;
8518 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8519 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8520 struct drm_plane *plane = disconnected_acrtc->base.primary;
8521 struct drm_connector_state *conn_state;
8522 struct drm_crtc_state *crtc_state;
8523 struct drm_plane_state *plane_state;
8528 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8530 /* Construct an atomic state to restore previous display setting */
8533 * Attach connectors to drm_atomic_state
8535 conn_state = drm_atomic_get_connector_state(state, connector);
8537 ret = PTR_ERR_OR_ZERO(conn_state);
8541 /* Attach crtc to drm_atomic_state*/
8542 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8544 ret = PTR_ERR_OR_ZERO(crtc_state);
8548 /* force a restore */
8549 crtc_state->mode_changed = true;
8551 /* Attach plane to drm_atomic_state */
8552 plane_state = drm_atomic_get_plane_state(state, plane);
8554 ret = PTR_ERR_OR_ZERO(plane_state);
8558 /* Call commit internally with the state we just constructed */
8559 ret = drm_atomic_commit(state);
8562 drm_atomic_state_put(state);
8564 DRM_ERROR("Restoring old state failed with %i\n", ret);
8570 * This function handles all cases when set mode does not come upon hotplug.
8571 * This includes when a display is unplugged then plugged back into the
8572 * same port and when running without usermode desktop manager supprot
8574 void dm_restore_drm_connector_state(struct drm_device *dev,
8575 struct drm_connector *connector)
8577 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8578 struct amdgpu_crtc *disconnected_acrtc;
8579 struct dm_crtc_state *acrtc_state;
8581 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8584 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8585 if (!disconnected_acrtc)
8588 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8589 if (!acrtc_state->stream)
8593 * If the previous sink is not released and different from the current,
8594 * we deduce we are in a state where we can not rely on usermode call
8595 * to turn on the display, so we do it here
8597 if (acrtc_state->stream->sink != aconnector->dc_sink)
8598 dm_force_atomic_commit(&aconnector->base);
8602 * Grabs all modesetting locks to serialize against any blocking commits,
8603 * Waits for completion of all non blocking commits.
8605 static int do_aquire_global_lock(struct drm_device *dev,
8606 struct drm_atomic_state *state)
8608 struct drm_crtc *crtc;
8609 struct drm_crtc_commit *commit;
8613 * Adding all modeset locks to aquire_ctx will
8614 * ensure that when the framework release it the
8615 * extra locks we are locking here will get released to
8617 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8621 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8622 spin_lock(&crtc->commit_lock);
8623 commit = list_first_entry_or_null(&crtc->commit_list,
8624 struct drm_crtc_commit, commit_entry);
8626 drm_crtc_commit_get(commit);
8627 spin_unlock(&crtc->commit_lock);
8633 * Make sure all pending HW programming completed and
8636 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8639 ret = wait_for_completion_interruptible_timeout(
8640 &commit->flip_done, 10*HZ);
8643 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8644 "timed out\n", crtc->base.id, crtc->name);
8646 drm_crtc_commit_put(commit);
8649 return ret < 0 ? ret : 0;
8652 static void get_freesync_config_for_crtc(
8653 struct dm_crtc_state *new_crtc_state,
8654 struct dm_connector_state *new_con_state)
8656 struct mod_freesync_config config = {0};
8657 struct amdgpu_dm_connector *aconnector =
8658 to_amdgpu_dm_connector(new_con_state->base.connector);
8659 struct drm_display_mode *mode = &new_crtc_state->base.mode;
8660 int vrefresh = drm_mode_vrefresh(mode);
8662 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8663 vrefresh >= aconnector->min_vfreq &&
8664 vrefresh <= aconnector->max_vfreq;
8666 if (new_crtc_state->vrr_supported) {
8667 new_crtc_state->stream->ignore_msa_timing_param = true;
8668 config.state = new_crtc_state->base.vrr_enabled ?
8669 VRR_STATE_ACTIVE_VARIABLE :
8671 config.min_refresh_in_uhz =
8672 aconnector->min_vfreq * 1000000;
8673 config.max_refresh_in_uhz =
8674 aconnector->max_vfreq * 1000000;
8675 config.vsif_supported = true;
8679 new_crtc_state->freesync_config = config;
8682 static void reset_freesync_config_for_crtc(
8683 struct dm_crtc_state *new_crtc_state)
8685 new_crtc_state->vrr_supported = false;
8687 memset(&new_crtc_state->vrr_infopacket, 0,
8688 sizeof(new_crtc_state->vrr_infopacket));
8691 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8692 struct drm_atomic_state *state,
8693 struct drm_crtc *crtc,
8694 struct drm_crtc_state *old_crtc_state,
8695 struct drm_crtc_state *new_crtc_state,
8697 bool *lock_and_validation_needed)
8699 struct dm_atomic_state *dm_state = NULL;
8700 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8701 struct dc_stream_state *new_stream;
8705 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8706 * update changed items
8708 struct amdgpu_crtc *acrtc = NULL;
8709 struct amdgpu_dm_connector *aconnector = NULL;
8710 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8711 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8715 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8716 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8717 acrtc = to_amdgpu_crtc(crtc);
8718 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8720 /* TODO This hack should go away */
8721 if (aconnector && enable) {
8722 /* Make sure fake sink is created in plug-in scenario */
8723 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8725 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8728 if (IS_ERR(drm_new_conn_state)) {
8729 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8733 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8734 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8736 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8739 new_stream = create_validate_stream_for_sink(aconnector,
8740 &new_crtc_state->mode,
8742 dm_old_crtc_state->stream);
8745 * we can have no stream on ACTION_SET if a display
8746 * was disconnected during S3, in this case it is not an
8747 * error, the OS will be updated after detection, and
8748 * will do the right thing on next atomic commit
8752 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8753 __func__, acrtc->base.base.id);
8759 * TODO: Check VSDB bits to decide whether this should
8760 * be enabled or not.
8762 new_stream->triggered_crtc_reset.enabled =
8763 dm->force_timing_sync;
8765 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8767 ret = fill_hdr_info_packet(drm_new_conn_state,
8768 &new_stream->hdr_static_metadata);
8773 * If we already removed the old stream from the context
8774 * (and set the new stream to NULL) then we can't reuse
8775 * the old stream even if the stream and scaling are unchanged.
8776 * We'll hit the BUG_ON and black screen.
8778 * TODO: Refactor this function to allow this check to work
8779 * in all conditions.
8781 if (dm_new_crtc_state->stream &&
8782 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8783 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8784 new_crtc_state->mode_changed = false;
8785 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8786 new_crtc_state->mode_changed);
8790 /* mode_changed flag may get updated above, need to check again */
8791 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8795 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8796 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8797 "connectors_changed:%d\n",
8799 new_crtc_state->enable,
8800 new_crtc_state->active,
8801 new_crtc_state->planes_changed,
8802 new_crtc_state->mode_changed,
8803 new_crtc_state->active_changed,
8804 new_crtc_state->connectors_changed);
8806 /* Remove stream for any changed/disabled CRTC */
8809 if (!dm_old_crtc_state->stream)
8812 ret = dm_atomic_get_state(state, &dm_state);
8816 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8819 /* i.e. reset mode */
8820 if (dc_remove_stream_from_ctx(
8823 dm_old_crtc_state->stream) != DC_OK) {
8828 dc_stream_release(dm_old_crtc_state->stream);
8829 dm_new_crtc_state->stream = NULL;
8831 reset_freesync_config_for_crtc(dm_new_crtc_state);
8833 *lock_and_validation_needed = true;
8835 } else {/* Add stream for any updated/enabled CRTC */
8837 * Quick fix to prevent NULL pointer on new_stream when
8838 * added MST connectors not found in existing crtc_state in the chained mode
8839 * TODO: need to dig out the root cause of that
8841 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8844 if (modereset_required(new_crtc_state))
8847 if (modeset_required(new_crtc_state, new_stream,
8848 dm_old_crtc_state->stream)) {
8850 WARN_ON(dm_new_crtc_state->stream);
8852 ret = dm_atomic_get_state(state, &dm_state);
8856 dm_new_crtc_state->stream = new_stream;
8858 dc_stream_retain(new_stream);
8860 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8863 if (dc_add_stream_to_ctx(
8866 dm_new_crtc_state->stream) != DC_OK) {
8871 *lock_and_validation_needed = true;
8876 /* Release extra reference */
8878 dc_stream_release(new_stream);
8881 * We want to do dc stream updates that do not require a
8882 * full modeset below.
8884 if (!(enable && aconnector && new_crtc_state->active))
8887 * Given above conditions, the dc state cannot be NULL because:
8888 * 1. We're in the process of enabling CRTCs (just been added
8889 * to the dc context, or already is on the context)
8890 * 2. Has a valid connector attached, and
8891 * 3. Is currently active and enabled.
8892 * => The dc stream state currently exists.
8894 BUG_ON(dm_new_crtc_state->stream == NULL);
8896 /* Scaling or underscan settings */
8897 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8898 update_stream_scaling_settings(
8899 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8902 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8905 * Color management settings. We also update color properties
8906 * when a modeset is needed, to ensure it gets reprogrammed.
8908 if (dm_new_crtc_state->base.color_mgmt_changed ||
8909 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8910 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8915 /* Update Freesync settings. */
8916 get_freesync_config_for_crtc(dm_new_crtc_state,
8923 dc_stream_release(new_stream);
8927 static bool should_reset_plane(struct drm_atomic_state *state,
8928 struct drm_plane *plane,
8929 struct drm_plane_state *old_plane_state,
8930 struct drm_plane_state *new_plane_state)
8932 struct drm_plane *other;
8933 struct drm_plane_state *old_other_state, *new_other_state;
8934 struct drm_crtc_state *new_crtc_state;
8938 * TODO: Remove this hack once the checks below are sufficient
8939 * enough to determine when we need to reset all the planes on
8942 if (state->allow_modeset)
8945 /* Exit early if we know that we're adding or removing the plane. */
8946 if (old_plane_state->crtc != new_plane_state->crtc)
8949 /* old crtc == new_crtc == NULL, plane not in context. */
8950 if (!new_plane_state->crtc)
8954 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8956 if (!new_crtc_state)
8959 /* CRTC Degamma changes currently require us to recreate planes. */
8960 if (new_crtc_state->color_mgmt_changed)
8963 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8967 * If there are any new primary or overlay planes being added or
8968 * removed then the z-order can potentially change. To ensure
8969 * correct z-order and pipe acquisition the current DC architecture
8970 * requires us to remove and recreate all existing planes.
8972 * TODO: Come up with a more elegant solution for this.
8974 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8975 struct amdgpu_framebuffer *old_afb, *new_afb;
8976 if (other->type == DRM_PLANE_TYPE_CURSOR)
8979 if (old_other_state->crtc != new_plane_state->crtc &&
8980 new_other_state->crtc != new_plane_state->crtc)
8983 if (old_other_state->crtc != new_other_state->crtc)
8986 /* Src/dst size and scaling updates. */
8987 if (old_other_state->src_w != new_other_state->src_w ||
8988 old_other_state->src_h != new_other_state->src_h ||
8989 old_other_state->crtc_w != new_other_state->crtc_w ||
8990 old_other_state->crtc_h != new_other_state->crtc_h)
8993 /* Rotation / mirroring updates. */
8994 if (old_other_state->rotation != new_other_state->rotation)
8997 /* Blending updates. */
8998 if (old_other_state->pixel_blend_mode !=
8999 new_other_state->pixel_blend_mode)
9002 /* Alpha updates. */
9003 if (old_other_state->alpha != new_other_state->alpha)
9006 /* Colorspace changes. */
9007 if (old_other_state->color_range != new_other_state->color_range ||
9008 old_other_state->color_encoding != new_other_state->color_encoding)
9011 /* Framebuffer checks fall at the end. */
9012 if (!old_other_state->fb || !new_other_state->fb)
9015 /* Pixel format changes can require bandwidth updates. */
9016 if (old_other_state->fb->format != new_other_state->fb->format)
9019 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9020 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9022 /* Tiling and DCC changes also require bandwidth updates. */
9023 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9024 old_afb->base.modifier != new_afb->base.modifier)
9031 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9032 struct drm_plane_state *new_plane_state,
9033 struct drm_framebuffer *fb)
9035 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9036 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9040 if (fb->width > new_acrtc->max_cursor_width ||
9041 fb->height > new_acrtc->max_cursor_height) {
9042 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9043 new_plane_state->fb->width,
9044 new_plane_state->fb->height);
9047 if (new_plane_state->src_w != fb->width << 16 ||
9048 new_plane_state->src_h != fb->height << 16) {
9049 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9053 /* Pitch in pixels */
9054 pitch = fb->pitches[0] / fb->format->cpp[0];
9056 if (fb->width != pitch) {
9057 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9066 /* FB pitch is supported by cursor plane */
9069 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9073 /* Core DRM takes care of checking FB modifiers, so we only need to
9074 * check tiling flags when the FB doesn't have a modifier. */
9075 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9076 if (adev->family < AMDGPU_FAMILY_AI) {
9077 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9078 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9079 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9081 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9084 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9092 static int dm_update_plane_state(struct dc *dc,
9093 struct drm_atomic_state *state,
9094 struct drm_plane *plane,
9095 struct drm_plane_state *old_plane_state,
9096 struct drm_plane_state *new_plane_state,
9098 bool *lock_and_validation_needed)
9101 struct dm_atomic_state *dm_state = NULL;
9102 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9103 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9104 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9105 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9106 struct amdgpu_crtc *new_acrtc;
9111 new_plane_crtc = new_plane_state->crtc;
9112 old_plane_crtc = old_plane_state->crtc;
9113 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9114 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9116 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9117 if (!enable || !new_plane_crtc ||
9118 drm_atomic_plane_disabling(plane->state, new_plane_state))
9121 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9123 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9124 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9128 if (new_plane_state->fb) {
9129 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9130 new_plane_state->fb);
9138 needs_reset = should_reset_plane(state, plane, old_plane_state,
9141 /* Remove any changed/removed planes */
9146 if (!old_plane_crtc)
9149 old_crtc_state = drm_atomic_get_old_crtc_state(
9150 state, old_plane_crtc);
9151 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9153 if (!dm_old_crtc_state->stream)
9156 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9157 plane->base.id, old_plane_crtc->base.id);
9159 ret = dm_atomic_get_state(state, &dm_state);
9163 if (!dc_remove_plane_from_context(
9165 dm_old_crtc_state->stream,
9166 dm_old_plane_state->dc_state,
9167 dm_state->context)) {
9173 dc_plane_state_release(dm_old_plane_state->dc_state);
9174 dm_new_plane_state->dc_state = NULL;
9176 *lock_and_validation_needed = true;
9178 } else { /* Add new planes */
9179 struct dc_plane_state *dc_new_plane_state;
9181 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9184 if (!new_plane_crtc)
9187 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9188 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9190 if (!dm_new_crtc_state->stream)
9196 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9200 WARN_ON(dm_new_plane_state->dc_state);
9202 dc_new_plane_state = dc_create_plane_state(dc);
9203 if (!dc_new_plane_state)
9206 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9207 plane->base.id, new_plane_crtc->base.id);
9209 ret = fill_dc_plane_attributes(
9210 drm_to_adev(new_plane_crtc->dev),
9215 dc_plane_state_release(dc_new_plane_state);
9219 ret = dm_atomic_get_state(state, &dm_state);
9221 dc_plane_state_release(dc_new_plane_state);
9226 * Any atomic check errors that occur after this will
9227 * not need a release. The plane state will be attached
9228 * to the stream, and therefore part of the atomic
9229 * state. It'll be released when the atomic state is
9232 if (!dc_add_plane_to_context(
9234 dm_new_crtc_state->stream,
9236 dm_state->context)) {
9238 dc_plane_state_release(dc_new_plane_state);
9242 dm_new_plane_state->dc_state = dc_new_plane_state;
9244 /* Tell DC to do a full surface update every time there
9245 * is a plane change. Inefficient, but works for now.
9247 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9249 *lock_and_validation_needed = true;
9256 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9257 struct drm_crtc *crtc,
9258 struct drm_crtc_state *new_crtc_state)
9260 struct drm_plane_state *new_cursor_state, *new_primary_state;
9261 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9263 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9264 * cursor per pipe but it's going to inherit the scaling and
9265 * positioning from the underlying pipe. Check the cursor plane's
9266 * blending properties match the primary plane's. */
9268 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9269 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9270 if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9274 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9275 (new_cursor_state->src_w >> 16);
9276 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9277 (new_cursor_state->src_h >> 16);
9279 primary_scale_w = new_primary_state->crtc_w * 1000 /
9280 (new_primary_state->src_w >> 16);
9281 primary_scale_h = new_primary_state->crtc_h * 1000 /
9282 (new_primary_state->src_h >> 16);
9284 if (cursor_scale_w != primary_scale_w ||
9285 cursor_scale_h != primary_scale_h) {
9286 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9293 #if defined(CONFIG_DRM_AMD_DC_DCN)
9294 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9296 struct drm_connector *connector;
9297 struct drm_connector_state *conn_state;
9298 struct amdgpu_dm_connector *aconnector = NULL;
9300 for_each_new_connector_in_state(state, connector, conn_state, i) {
9301 if (conn_state->crtc != crtc)
9304 aconnector = to_amdgpu_dm_connector(connector);
9305 if (!aconnector->port || !aconnector->mst_port)
9314 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9319 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9320 * @dev: The DRM device
9321 * @state: The atomic state to commit
9323 * Validate that the given atomic state is programmable by DC into hardware.
9324 * This involves constructing a &struct dc_state reflecting the new hardware
9325 * state we wish to commit, then querying DC to see if it is programmable. It's
9326 * important not to modify the existing DC state. Otherwise, atomic_check
9327 * may unexpectedly commit hardware changes.
9329 * When validating the DC state, it's important that the right locks are
9330 * acquired. For full updates case which removes/adds/updates streams on one
9331 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9332 * that any such full update commit will wait for completion of any outstanding
9333 * flip using DRMs synchronization events.
9335 * Note that DM adds the affected connectors for all CRTCs in state, when that
9336 * might not seem necessary. This is because DC stream creation requires the
9337 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9338 * be possible but non-trivial - a possible TODO item.
9340 * Return: -Error code if validation failed.
9342 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9343 struct drm_atomic_state *state)
9345 struct amdgpu_device *adev = drm_to_adev(dev);
9346 struct dm_atomic_state *dm_state = NULL;
9347 struct dc *dc = adev->dm.dc;
9348 struct drm_connector *connector;
9349 struct drm_connector_state *old_con_state, *new_con_state;
9350 struct drm_crtc *crtc;
9351 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9352 struct drm_plane *plane;
9353 struct drm_plane_state *old_plane_state, *new_plane_state;
9354 enum dc_status status;
9356 bool lock_and_validation_needed = false;
9357 struct dm_crtc_state *dm_old_crtc_state;
9359 trace_amdgpu_dm_atomic_check_begin(state);
9361 ret = drm_atomic_helper_check_modeset(dev, state);
9365 /* Check connector changes */
9366 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9367 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9368 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9370 /* Skip connectors that are disabled or part of modeset already. */
9371 if (!old_con_state->crtc && !new_con_state->crtc)
9374 if (!new_con_state->crtc)
9377 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9378 if (IS_ERR(new_crtc_state)) {
9379 ret = PTR_ERR(new_crtc_state);
9383 if (dm_old_con_state->abm_level !=
9384 dm_new_con_state->abm_level)
9385 new_crtc_state->connectors_changed = true;
9388 #if defined(CONFIG_DRM_AMD_DC_DCN)
9389 if (adev->asic_type >= CHIP_NAVI10) {
9390 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9391 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9392 ret = add_affected_mst_dsc_crtcs(state, crtc);
9399 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9400 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9402 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9403 !new_crtc_state->color_mgmt_changed &&
9404 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9405 dm_old_crtc_state->dsc_force_changed == false)
9408 if (!new_crtc_state->enable)
9411 ret = drm_atomic_add_affected_connectors(state, crtc);
9415 ret = drm_atomic_add_affected_planes(state, crtc);
9419 if (dm_old_crtc_state->dsc_force_changed)
9420 new_crtc_state->mode_changed = true;
9424 * Add all primary and overlay planes on the CRTC to the state
9425 * whenever a plane is enabled to maintain correct z-ordering
9426 * and to enable fast surface updates.
9428 drm_for_each_crtc(crtc, dev) {
9429 bool modified = false;
9431 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9432 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9435 if (new_plane_state->crtc == crtc ||
9436 old_plane_state->crtc == crtc) {
9445 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9446 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9450 drm_atomic_get_plane_state(state, plane);
9452 if (IS_ERR(new_plane_state)) {
9453 ret = PTR_ERR(new_plane_state);
9459 /* Remove exiting planes if they are modified */
9460 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9461 ret = dm_update_plane_state(dc, state, plane,
9465 &lock_and_validation_needed);
9470 /* Disable all crtcs which require disable */
9471 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9472 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9476 &lock_and_validation_needed);
9481 /* Enable all crtcs which require enable */
9482 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9483 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9487 &lock_and_validation_needed);
9492 /* Add new/modified planes */
9493 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9494 ret = dm_update_plane_state(dc, state, plane,
9498 &lock_and_validation_needed);
9503 /* Run this here since we want to validate the streams we created */
9504 ret = drm_atomic_helper_check_planes(dev, state);
9508 /* Check cursor planes scaling */
9509 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9510 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9515 if (state->legacy_cursor_update) {
9517 * This is a fast cursor update coming from the plane update
9518 * helper, check if it can be done asynchronously for better
9521 state->async_update =
9522 !drm_atomic_helper_async_check(dev, state);
9525 * Skip the remaining global validation if this is an async
9526 * update. Cursor updates can be done without affecting
9527 * state or bandwidth calcs and this avoids the performance
9528 * penalty of locking the private state object and
9529 * allocating a new dc_state.
9531 if (state->async_update)
9535 /* Check scaling and underscan changes*/
9536 /* TODO Removed scaling changes validation due to inability to commit
9537 * new stream into context w\o causing full reset. Need to
9538 * decide how to handle.
9540 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9541 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9542 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9543 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9545 /* Skip any modesets/resets */
9546 if (!acrtc || drm_atomic_crtc_needs_modeset(
9547 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9550 /* Skip any thing not scale or underscan changes */
9551 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9554 lock_and_validation_needed = true;
9558 * Streams and planes are reset when there are changes that affect
9559 * bandwidth. Anything that affects bandwidth needs to go through
9560 * DC global validation to ensure that the configuration can be applied
9563 * We have to currently stall out here in atomic_check for outstanding
9564 * commits to finish in this case because our IRQ handlers reference
9565 * DRM state directly - we can end up disabling interrupts too early
9568 * TODO: Remove this stall and drop DM state private objects.
9570 if (lock_and_validation_needed) {
9571 ret = dm_atomic_get_state(state, &dm_state);
9575 ret = do_aquire_global_lock(dev, state);
9579 #if defined(CONFIG_DRM_AMD_DC_DCN)
9580 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9583 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9589 * Perform validation of MST topology in the state:
9590 * We need to perform MST atomic check before calling
9591 * dc_validate_global_state(), or there is a chance
9592 * to get stuck in an infinite loop and hang eventually.
9594 ret = drm_dp_mst_atomic_check(state);
9597 status = dc_validate_global_state(dc, dm_state->context, false);
9598 if (status != DC_OK) {
9599 DC_LOG_WARNING("DC global validation failure: %s (%d)",
9600 dc_status_to_str(status), status);
9606 * The commit is a fast update. Fast updates shouldn't change
9607 * the DC context, affect global validation, and can have their
9608 * commit work done in parallel with other commits not touching
9609 * the same resource. If we have a new DC context as part of
9610 * the DM atomic state from validation we need to free it and
9611 * retain the existing one instead.
9613 * Furthermore, since the DM atomic state only contains the DC
9614 * context and can safely be annulled, we can free the state
9615 * and clear the associated private object now to free
9616 * some memory and avoid a possible use-after-free later.
9619 for (i = 0; i < state->num_private_objs; i++) {
9620 struct drm_private_obj *obj = state->private_objs[i].ptr;
9622 if (obj->funcs == adev->dm.atomic_obj.funcs) {
9623 int j = state->num_private_objs-1;
9625 dm_atomic_destroy_state(obj,
9626 state->private_objs[i].state);
9628 /* If i is not at the end of the array then the
9629 * last element needs to be moved to where i was
9630 * before the array can safely be truncated.
9633 state->private_objs[i] =
9634 state->private_objs[j];
9636 state->private_objs[j].ptr = NULL;
9637 state->private_objs[j].state = NULL;
9638 state->private_objs[j].old_state = NULL;
9639 state->private_objs[j].new_state = NULL;
9641 state->num_private_objs = j;
9647 /* Store the overall update type for use later in atomic check. */
9648 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9649 struct dm_crtc_state *dm_new_crtc_state =
9650 to_dm_crtc_state(new_crtc_state);
9652 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9657 /* Must be success */
9660 trace_amdgpu_dm_atomic_check_finish(state, ret);
9665 if (ret == -EDEADLK)
9666 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9667 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9668 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9670 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9672 trace_amdgpu_dm_atomic_check_finish(state, ret);
9677 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9678 struct amdgpu_dm_connector *amdgpu_dm_connector)
9681 bool capable = false;
9683 if (amdgpu_dm_connector->dc_link &&
9684 dm_helpers_dp_read_dpcd(
9686 amdgpu_dm_connector->dc_link,
9687 DP_DOWN_STREAM_PORT_COUNT,
9689 sizeof(dpcd_data))) {
9690 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9695 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9699 bool edid_check_required;
9700 struct detailed_timing *timing;
9701 struct detailed_non_pixel *data;
9702 struct detailed_data_monitor_range *range;
9703 struct amdgpu_dm_connector *amdgpu_dm_connector =
9704 to_amdgpu_dm_connector(connector);
9705 struct dm_connector_state *dm_con_state = NULL;
9707 struct drm_device *dev = connector->dev;
9708 struct amdgpu_device *adev = drm_to_adev(dev);
9709 bool freesync_capable = false;
9711 if (!connector->state) {
9712 DRM_ERROR("%s - Connector has no state", __func__);
9717 dm_con_state = to_dm_connector_state(connector->state);
9719 amdgpu_dm_connector->min_vfreq = 0;
9720 amdgpu_dm_connector->max_vfreq = 0;
9721 amdgpu_dm_connector->pixel_clock_mhz = 0;
9726 dm_con_state = to_dm_connector_state(connector->state);
9728 edid_check_required = false;
9729 if (!amdgpu_dm_connector->dc_sink) {
9730 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9733 if (!adev->dm.freesync_module)
9736 * if edid non zero restrict freesync only for dp and edp
9739 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9740 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9741 edid_check_required = is_dp_capable_without_timing_msa(
9743 amdgpu_dm_connector);
9746 if (edid_check_required == true && (edid->version > 1 ||
9747 (edid->version == 1 && edid->revision > 1))) {
9748 for (i = 0; i < 4; i++) {
9750 timing = &edid->detailed_timings[i];
9751 data = &timing->data.other_data;
9752 range = &data->data.range;
9754 * Check if monitor has continuous frequency mode
9756 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9759 * Check for flag range limits only. If flag == 1 then
9760 * no additional timing information provided.
9761 * Default GTF, GTF Secondary curve and CVT are not
9764 if (range->flags != 1)
9767 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9768 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9769 amdgpu_dm_connector->pixel_clock_mhz =
9770 range->pixel_clock_mhz * 10;
9772 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
9773 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
9778 if (amdgpu_dm_connector->max_vfreq -
9779 amdgpu_dm_connector->min_vfreq > 10) {
9781 freesync_capable = true;
9787 dm_con_state->freesync_capable = freesync_capable;
9789 if (connector->vrr_capable_property)
9790 drm_connector_set_vrr_capable_property(connector,
9794 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9796 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9798 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9800 if (link->type == dc_connection_none)
9802 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9803 dpcd_data, sizeof(dpcd_data))) {
9804 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9806 if (dpcd_data[0] == 0) {
9807 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9808 link->psr_settings.psr_feature_enabled = false;
9810 link->psr_settings.psr_version = DC_PSR_VERSION_1;
9811 link->psr_settings.psr_feature_enabled = true;
9814 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9819 * amdgpu_dm_link_setup_psr() - configure psr link
9820 * @stream: stream state
9822 * Return: true if success
9824 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9826 struct dc_link *link = NULL;
9827 struct psr_config psr_config = {0};
9828 struct psr_context psr_context = {0};
9834 link = stream->link;
9836 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9838 if (psr_config.psr_version > 0) {
9839 psr_config.psr_exit_link_training_required = 0x1;
9840 psr_config.psr_frame_capture_indication_req = 0;
9841 psr_config.psr_rfb_setup_time = 0x37;
9842 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9843 psr_config.allow_smu_optimizations = 0x0;
9845 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9848 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
9854 * amdgpu_dm_psr_enable() - enable psr f/w
9855 * @stream: stream state
9857 * Return: true if success
9859 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9861 struct dc_link *link = stream->link;
9862 unsigned int vsync_rate_hz = 0;
9863 struct dc_static_screen_params params = {0};
9864 /* Calculate number of static frames before generating interrupt to
9867 // Init fail safe of 2 frames static
9868 unsigned int num_frames_static = 2;
9870 DRM_DEBUG_DRIVER("Enabling psr...\n");
9872 vsync_rate_hz = div64_u64(div64_u64((
9873 stream->timing.pix_clk_100hz * 100),
9874 stream->timing.v_total),
9875 stream->timing.h_total);
9878 * Calculate number of frames such that at least 30 ms of time has
9881 if (vsync_rate_hz != 0) {
9882 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9883 num_frames_static = (30000 / frame_time_microsec) + 1;
9886 params.triggers.cursor_update = true;
9887 params.triggers.overlay_update = true;
9888 params.triggers.surface_update = true;
9889 params.num_frames = num_frames_static;
9891 dc_stream_set_static_screen_params(link->ctx->dc,
9895 return dc_link_set_psr_allow_active(link, true, false, false);
9899 * amdgpu_dm_psr_disable() - disable psr f/w
9900 * @stream: stream state
9902 * Return: true if success
9904 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9907 DRM_DEBUG_DRIVER("Disabling psr...\n");
9909 return dc_link_set_psr_allow_active(stream->link, false, true, false);
9913 * amdgpu_dm_psr_disable() - disable psr f/w
9914 * if psr is enabled on any stream
9916 * Return: true if success
9918 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9920 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9921 return dc_set_psr_allow_active(dm->dc, false);
9924 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9926 struct amdgpu_device *adev = drm_to_adev(dev);
9927 struct dc *dc = adev->dm.dc;
9930 mutex_lock(&adev->dm.dc_lock);
9931 if (dc->current_state) {
9932 for (i = 0; i < dc->current_state->stream_count; ++i)
9933 dc->current_state->streams[i]
9934 ->triggered_crtc_reset.enabled =
9935 adev->dm.force_timing_sync;
9937 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9938 dc_trigger_sync(dc, dc->current_state);
9940 mutex_unlock(&adev->dm.dc_lock);
9943 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9944 uint32_t value, const char *func_name)
9946 #ifdef DM_CHECK_ADDR_0
9948 DC_ERR("invalid register write. address = 0");
9952 cgs_write_register(ctx->cgs_device, address, value);
9953 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9956 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9957 const char *func_name)
9960 #ifdef DM_CHECK_ADDR_0
9962 DC_ERR("invalid register read; address = 0\n");
9967 if (ctx->dmub_srv &&
9968 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9969 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9974 value = cgs_read_register(ctx->cgs_device, address);
9976 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);